summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bcma/driver_gpio.c8
-rw-r--r--drivers/bcma/main.c4
-rw-r--r--drivers/bcma/sprom.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c4
-rw-r--r--drivers/hv/hv_util.c4
-rw-r--r--drivers/i2c/i2c-core-base.c14
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c14
-rw-r--r--drivers/net/bonding/bond_3ad.c9
-rw-r--r--drivers/net/bonding/bond_main.c26
-rw-r--r--drivers/net/bonding/bond_options.c18
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/Kconfig3
-rw-r--r--drivers/net/can/m_can/m_can.c26
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/pch_can.c1249
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c22
-rw-r--r--drivers/net/can/usb/Kconfig9
-rw-r--r--drivers/net/can/usb/gs_usb.c74
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile5
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h30
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c115
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c160
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c464
-rw-r--r--drivers/net/can/usb/ucan.c5
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c5
-rw-r--r--drivers/net/dsa/lan9303_i2c.c5
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz8.h1
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c75
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c9
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c24
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c17
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c150
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h17
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c10
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/ocelot/felix.c17
-rw-r--r--drivers/net/dsa/ocelot/felix.h4
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c35
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c32
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c12
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c5
-rw-r--r--drivers/net/dummy.c7
-rw-r--r--drivers/net/ethernet/Kconfig10
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/adin1110.c58
-rw-r--r--drivers/net/ethernet/alacritech/slic.h12
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c8
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c57
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c23
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c86
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c134
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h281
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c31
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c19
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h60
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h27
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c13
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c26
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h23
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cortina/gemini.c24
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c16
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c165
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h7
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c245
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c65
-rw-r--r--drivers/net/ethernet/fealnx.c1953
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c89
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c90
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c57
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h142
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c609
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h112
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c128
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c45
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c60
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c454
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h19
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h20
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c397
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c177
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c457
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c744
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c130
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c168
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h23
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c7
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c13
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h27
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c21
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h51
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h5
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c18
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c64
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c544
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c20
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h170
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c13
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c50
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c239
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h5
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000e_trace.h42
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c51
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c17
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c634
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c272
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c351
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c195
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c30
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c25
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c49
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c262
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c151
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c25
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c22
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c21
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c119
-rw-r--r--drivers/net/ethernet/marvell/sky2.c8
-rw-r--r--drivers/net/ethernet/mediatek/Makefile2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c504
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h59
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c12
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c174
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c849
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h21
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c87
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c387
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h140
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c512
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h282
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c437
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c251
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c159
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h71
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c54
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c433
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c54
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c42
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h95
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c236
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h196
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c254
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c1608
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c549
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c140
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c310
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h20
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h583
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c99
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h42
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c51
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h19
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1016
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c97
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c1351
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c200
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h33
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c723
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h81
-rw-r--r--drivers/net/ethernet/microchip/vcap/Kconfig53
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h735
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c2883
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h280
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h265
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c431
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h41
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c555
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c2245
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h113
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c5570
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h10
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h692
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c70
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h195
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h634
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c185
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.h21
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c12
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c244
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c14
-rw-r--r--drivers/net/ethernet/netronome/Kconfig11
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c587
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c52
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c53
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c81
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h56
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c26
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h45
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c113
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/renesas/Kconfig12
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c17
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c181
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h72
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c1841
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h973
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c15
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c23
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c9
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c37
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/mae.c306
-rw-r--r--drivers/net/ethernet/sfc/mae.h7
-rw-r--r--drivers/net/ethernet/sfc/mae_counter_format.h73
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h17
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/tc.c302
-rw-r--r--drivers/net/ethernet/sfc/tc.h48
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c503
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.h59
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/smsc/Kconfig14
-rw-r--r--drivers/net/ethernet/smsc/Makefile1
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2198
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h901
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c388
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c23
-rw-r--r--drivers/net/ethernet/sun/cassini.c48
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c255
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h6
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c81
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpts.c20
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig7
-rw-r--r--drivers/net/ethernet/wangxun/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile7
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c936
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h28
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h352
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h55
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c87
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h12
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c368
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h99
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c312
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h11
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c465
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h47
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c45
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c79
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c19
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/ieee802154/atusb.c33
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c179
-rw-r--r--drivers/net/ieee802154/mcr20a.c9
-rw-r--r--drivers/net/ifb.c12
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c19
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c27
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c17
-rw-r--r--drivers/net/ipa/gsi_trans.c7
-rw-r--r--drivers/net/ipa/ipa.h32
-rw-r--r--drivers/net/ipa/ipa_cmd.c74
-rw-r--r--drivers/net/ipa/ipa_cmd.h16
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_endpoint.c277
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c34
-rw-r--r--drivers/net/ipa/ipa_main.c108
-rw-r--r--drivers/net/ipa/ipa_mem.c19
-rw-r--r--drivers/net/ipa/ipa_qmi.c9
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c20
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h20
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipa/ipa_table.c350
-rw-r--r--drivers/net/ipa/ipa_table.h30
-rw-r--r--drivers/net/ipa/ipa_version.h3
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c13
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macsec.c12
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/mhi_net.c8
-rw-r--r--drivers/net/netdevsim/dev.c7
-rw-r--r--drivers/net/netdevsim/netdev.c14
-rw-r--r--drivers/net/pcs/pcs-altera-tse.c21
-rw-r--r--drivers/net/pcs/pcs-xpcs.c10
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/aquantia_main.c40
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83867.c7
-rw-r--r--drivers/net/phy/micrel.c77
-rw-r--r--drivers/net/phy/motorcomm.c1677
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c57
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h2
-rw-r--r--drivers/net/phy/mxl-gpy.c106
-rw-r--r--drivers/net/phy/phy-core.c11
-rw-r--r--drivers/net/phy/phy.c1
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/phylink.c43
-rw-r--r--drivers/net/phy/sfp.c162
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/team/team_mode_loadbalance.c4
-rw-r--r--drivers/net/thunderbolt.c26
-rw-r--r--drivers/net/tun.c67
-rw-r--r--drivers/net/usb/asix_devices.c23
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/virtio_net.c16
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c4
-rw-r--r--drivers/net/wireless/admtek/adm8211.c1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c37
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c126
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h102
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c232
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c87
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile5
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c36
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c1
-rw-r--r--drivers/net/wireless/atmel/atmel.c162
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c11
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c587
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c139
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c1
-rw-r--r--drivers/net/wireless/cisco/airo.c204
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c16
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c302
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c212
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c85
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c244
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.c131
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.h4
-rw-r--r--drivers/net/wireless/intersil/p54/main.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c246
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c214
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c410
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c307
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c207
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c135
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c635
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c495
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c414
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c71
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c233
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c59
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c851
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c823
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c2498
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h398
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c1334
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c3607
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h669
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c386
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h523
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c222
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h542
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c1
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig7
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h95
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c1766
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c73
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c88
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c114
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c584
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig14
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile11
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h238
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c1052
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c732
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h709
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c706
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h117
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c67
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c41
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c358
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h87
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h449
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c179
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2445
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h137
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c4174
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c794
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h62
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c22877
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.h30
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c64
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c232
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c988
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c859
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h21
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c1
-rw-r--r--drivers/net/wireless/st/cw1200/main.c1
-rw-r--r--drivers/net/wireless/ti/Kconfig8
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c8
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c76
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/zydas/zd1201.c174
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c1
-rw-r--r--drivers/net/wwan/Kconfig1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c2
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c8
-rw-r--r--drivers/net/wwan/t7xx/Makefile3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c218
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c91
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c12
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c116
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c16
-rw-r--r--drivers/net/wwan/wwan_core.c6
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/microread/i2c.c5
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c5
-rw-r--r--drivers/nfc/nxp-nci/i2c.c5
-rw-r--r--drivers/nfc/pn533/i2c.c5
-rw-r--r--drivers/nfc/pn544/i2c.c5
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c24
-rw-r--r--drivers/nfc/st-nci/i2c.c5
-rw-r--r--drivers/nfc/st21nfca/i2c.c5
-rw-r--r--drivers/nfc/virtual_ncidev.c147
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/ptp/ptp_dte.c5
-rw-r--r--drivers/ptp/ptp_idt82p33.c709
-rw-r--r--drivers/ptp/ptp_idt82p33.h21
-rw-r--r--drivers/ptp/ptp_kvm_common.c4
-rw-r--r--drivers/ptp/ptp_ocp.c567
-rw-r--r--drivers/ptp/ptp_pch.c19
-rw-r--r--drivers/ptp/ptp_vmw.c4
-rw-r--r--drivers/s390/net/ctcm_main.c11
-rw-r--r--drivers/s390/net/lcs.c8
-rw-r--r--drivers/s390/net/netiucv.c9
-rw-r--r--drivers/staging/vt6655/device_main.c1
-rw-r--r--drivers/staging/vt6656/main_usb.c1
927 files changed, 109057 insertions, 17928 deletions
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 65fb9bad1577..5f90bac6bb09 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -82,6 +82,7 @@ static void bcma_gpio_irq_unmask(struct irq_data *d)
int gpio = irqd_to_hwirq(d);
u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
+ gpiochip_enable_irq(gc, gpio);
bcma_chipco_gpio_polarity(cc, BIT(gpio), val);
bcma_chipco_gpio_intmask(cc, BIT(gpio), BIT(gpio));
}
@@ -93,12 +94,15 @@ static void bcma_gpio_irq_mask(struct irq_data *d)
int gpio = irqd_to_hwirq(d);
bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
+ gpiochip_disable_irq(gc, gpio);
}
-static struct irq_chip bcma_gpio_irq_chip = {
+static const struct irq_chip bcma_gpio_irq_chip = {
.name = "BCMA-GPIO",
.irq_mask = bcma_gpio_irq_mask,
.irq_unmask = bcma_gpio_irq_unmask,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
@@ -139,7 +143,7 @@ static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
bcma_chipco_gpio_intmask(cc, ~0, 0);
bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
- girq->chip = &bcma_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &bcma_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 44392b624b20..0a8469e0b13a 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -344,8 +344,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
err = bcma_gpio_init(&bus->drv_cc);
if (err == -ENOTSUPP)
bcma_debug(bus, "GPIO driver not activated\n");
- else if (err)
+ else if (err) {
bcma_err(bus, "Error registering GPIO driver: %i\n", err);
+ return err;
+ }
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
err = bcma_chipco_watchdog_register(&bus->drv_cc);
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 3da01f173c63..e668ad7963fc 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -165,7 +165,7 @@ static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom,
return err;
revision = sprom[words - 1] & SSB_SPROM_REVISION_REV;
- if (revision != 8 && revision != 9 && revision != 10) {
+ if (revision < 8 || revision > 11) {
pr_err("Unsupported SPROM revision: %d\n", revision);
return -ENOENT;
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index 7503f6b18ac5..a2aba0b0d68a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -76,10 +76,6 @@ static int otx2_cpt_devlink_info_get(struct devlink *dl,
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
int err;
- err = devlink_info_driver_name_put(req, "rvu_cptpf");
- if (err)
- return err;
-
err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp,
"fw.ae", OTX2_CPT_AE_TYPES);
if (err)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 835e6039c186..d776074b49cb 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -706,7 +706,7 @@ static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
return -EOPNOTSUPP;
}
-static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int hv_ptp_adjfine(struct ptp_clock_info *ptp, long delta)
{
return -EOPNOTSUPP;
}
@@ -724,7 +724,7 @@ static struct ptp_clock_info ptp_hyperv_info = {
.name = "hyperv",
.enable = hv_ptp_enable,
.adjtime = hv_ptp_adjtime,
- .adjfreq = hv_ptp_adjfreq,
+ .adjfine = hv_ptp_adjfine,
.gettime64 = hv_ptp_gettime,
.settime64 = hv_ptp_settime,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7539b0740351..13fafb74bab8 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2237,6 +2237,20 @@ int i2c_get_device_id(const struct i2c_client *client,
}
EXPORT_SYMBOL_GPL(i2c_get_device_id);
+/**
+ * i2c_client_get_device_id - get the driver match table entry of a device
+ * @client: the device to query. The device must be bound to a driver
+ *
+ * Returns a pointer to the matching entry if found, NULL otherwise.
+ */
+const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client)
+{
+ const struct i2c_driver *drv = to_i2c_driver(client->dev.driver);
+
+ return i2c_match_id(drv->id_table, client);
+}
+EXPORT_SYMBOL_GPL(i2c_client_get_device_id);
+
/* ----------------------------------------------------
* the i2c address scanning function
* Will not work for 10-bit addresses!
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index bc97958818bb..e6e021af6aa9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -230,8 +230,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
struct ib_umem_odp *umem_odp =
container_of(mni, struct ib_umem_odp, notifier);
struct mlx5_ib_mr *mr;
- const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
- sizeof(struct mlx5_mtt)) - 1;
+ const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
u64 idx = 0, blk_start_idx = 0;
u64 invalidations = 0;
unsigned long start;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index d5105b5c9979..029e9536ec28 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -418,7 +418,7 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
}
#define MLX5_MAX_UMR_CHUNK \
- ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT)
+ ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_FLEX_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000
/*
@@ -428,11 +428,11 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
*/
static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{
- const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size;
+ const size_t xlt_chunk_align = MLX5_UMR_FLEX_ALIGNMENT / ent_size;
size_t size;
void *res = NULL;
- static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
+ static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0);
/*
* MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
@@ -666,7 +666,7 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
}
final_size = (void *)cur_mtt - (void *)mtt;
- sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
+ sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
memset(cur_mtt, 0, sg.length - final_size);
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
@@ -690,7 +690,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
? sizeof(struct mlx5_klm)
: sizeof(struct mlx5_mtt);
- const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
+ const int page_align = MLX5_UMR_FLEX_ALIGNMENT / desc_size;
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
const int page_mask = page_align - 1;
@@ -711,7 +711,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
if (WARN_ON(!mr->umem->is_odp))
return -EINVAL;
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
+ /* UMR copies MTTs in units of MLX5_UMR_FLEX_ALIGNMENT bytes,
* so we need to align the offset and length accordingly
*/
if (idx & page_mask) {
@@ -748,7 +748,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
- sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
+ sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
if (pages_mapped + pages_iter >= pages_to_map)
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index e58a1e0cadd2..455b555275f1 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -75,6 +75,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_100000MBPS,
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
+ AD_LINK_SPEED_800000MBPS,
};
/* compare MAC addresses */
@@ -251,6 +252,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_100000MBPS
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
+ * %AD_LINK_SPEED_800000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -326,6 +328,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_400000MBPS;
break;
+ case SPEED_800000:
+ speed = AD_LINK_SPEED_800000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
@@ -753,6 +759,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_400000MBPS:
bandwidth = nports * 400000;
break;
+ case AD_LINK_SPEED_800000MBPS:
+ bandwidth = nports * 800000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b9a882f182d2..4048876f842c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -307,7 +307,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
return dev_queue_xmit(skb);
}
-bool bond_sk_check(struct bonding *bond)
+static bool bond_sk_check(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_8023AD:
@@ -1398,13 +1398,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (bond_sk_check(bond))
- features |= BOND_TLS_FEATURES;
- else
- features &= ~BOND_TLS_FEATURES;
-#endif
-
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
@@ -2531,12 +2524,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
/* called with rcu_read_lock() */
static int bond_miimon_inspect(struct bonding *bond)
{
+ bool ignore_updelay = false;
int link_state, commit = 0;
struct list_head *iter;
struct slave *slave;
- bool ignore_updelay;
- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ } else {
+ struct bond_up_slave *usable_slaves;
+
+ usable_slaves = rcu_dereference(bond->usable_slaves);
+
+ if (usable_slaves && usable_slaves->count == 0)
+ ignore_updelay = true;
+ }
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
@@ -5813,10 +5815,6 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (bond_sk_check(bond))
- bond_dev->features |= BOND_TLS_FEATURES;
-#endif
}
/* Destroy a bonding device.
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 3498db1c1b3c..f71d5517f829 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -842,19 +842,6 @@ static bool bond_set_xfrm_features(struct bonding *bond)
return true;
}
-static bool bond_set_tls_features(struct bonding *bond)
-{
- if (!IS_ENABLED(CONFIG_TLS_DEVICE))
- return false;
-
- if (bond_sk_check(bond))
- bond->dev->wanted_features |= BOND_TLS_FEATURES;
- else
- bond->dev->wanted_features &= ~BOND_TLS_FEATURES;
-
- return true;
-}
-
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -885,7 +872,6 @@ static int bond_option_mode_set(struct bonding *bond,
bool update = false;
update |= bond_set_xfrm_features(bond);
- update |= bond_set_tls_features(bond);
if (update)
netdev_update_features(bond->dev);
@@ -1418,10 +1404,6 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
- if (bond->dev->reg_state == NETREG_REGISTERED)
- if (bond_set_tls_features(bond))
- netdev_update_features(bond->dev);
-
return 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 3048ad77edb3..cd34e8dc9394 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -198,14 +198,6 @@ config CAN_XILINXCAN
Xilinx CAN driver. This driver supports both soft AXI CAN IP and
Zynq CANPS IP.
-config PCH_CAN
- tristate "Intel EG20T PCH CAN controller"
- depends on PCI && (X86_32 || COMPILE_TEST)
- help
- This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
- is an IOH for x86 embedded processor (Intel Atom E6xx series).
- This driver can access CAN bus.
-
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ctucanfd/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 61c75ce9d500..52b0f6e10668 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -30,6 +30,5 @@ obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
-obj-$(CONFIG_PCH_CAN) += pch_can.o
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 962725788b0a..1f0e9acb69ec 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -20,5 +20,6 @@ config CAN_C_CAN_PCI
depends on PCI
help
This driver adds support for the C_CAN/D_CAN chips connected
- to the PCI bus.
+ to the PCI bus. E.g. for the C_CAN controller IP inside the
+ Intel Atom E6xx series IOH (aka EG20T 'PCH CAN').
endif
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index e5575d2755e4..0bdec28e7c85 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -156,6 +156,7 @@ enum m_can_reg {
#define PSR_EW BIT(6)
#define PSR_EP BIT(5)
#define PSR_LEC_MASK GENMASK(2, 0)
+#define PSR_DLEC_MASK GENMASK(10, 8)
/* Interrupt Register (IR) */
#define IR_ALL_INT 0xffffffff
@@ -209,7 +210,7 @@ enum m_can_reg {
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
@@ -816,11 +817,9 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
netdev_err(dev, "Message RAM access failure occurred\n");
}
-static inline bool is_lec_err(u32 psr)
+static inline bool is_lec_err(u8 lec)
{
- psr &= LEC_UNUSED;
-
- return psr && (psr != LEC_UNUSED);
+ return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE;
}
static inline bool m_can_is_protocol_err(u32 irqstatus)
@@ -875,9 +874,20 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
work_done += m_can_handle_lost_msg(dev);
/* handle lec errors on the bus */
- if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
- is_lec_err(psr))
- work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ u8 lec = FIELD_GET(PSR_LEC_MASK, psr);
+ u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr);
+
+ if (is_lec_err(lec)) {
+ netdev_dbg(dev, "Arbitration phase error detected\n");
+ work_done += m_can_handle_lec_err(dev, lec);
+ }
+
+ if (is_lec_err(dlec)) {
+ netdev_dbg(dev, "Data phase error detected\n");
+ work_done += m_can_handle_lec_err(dev, dlec);
+ }
+ }
/* handle protocol errors in arbitration phase */
if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 4c0267f9f297..52563c048732 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -38,7 +38,7 @@ enum m_can_lec_type {
LEC_BIT1_ERROR,
LEC_BIT0_ERROR,
LEC_CRC_ERROR,
- LEC_UNUSED,
+ LEC_NO_CHANGE,
};
enum m_can_mram_cfg {
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
deleted file mode 100644
index 2a44b2803e55..000000000000
--- a/drivers/net/can/pch_can.c
+++ /dev/null
@@ -1,1249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD.
- */
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
-#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
-#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
-#define PCH_CTRL_CCE BIT(6)
-#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
-#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
-#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
-
-#define PCH_CMASK_RX_TX_SET 0x00f3
-#define PCH_CMASK_RX_TX_GET 0x0073
-#define PCH_CMASK_ALL 0xff
-#define PCH_CMASK_NEWDAT BIT(2)
-#define PCH_CMASK_CLRINTPND BIT(3)
-#define PCH_CMASK_CTRL BIT(4)
-#define PCH_CMASK_ARB BIT(5)
-#define PCH_CMASK_MASK BIT(6)
-#define PCH_CMASK_RDWR BIT(7)
-#define PCH_IF_MCONT_NEWDAT BIT(15)
-#define PCH_IF_MCONT_MSGLOST BIT(14)
-#define PCH_IF_MCONT_INTPND BIT(13)
-#define PCH_IF_MCONT_UMASK BIT(12)
-#define PCH_IF_MCONT_TXIE BIT(11)
-#define PCH_IF_MCONT_RXIE BIT(10)
-#define PCH_IF_MCONT_RMTEN BIT(9)
-#define PCH_IF_MCONT_TXRQXT BIT(8)
-#define PCH_IF_MCONT_EOB BIT(7)
-#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
-#define PCH_ID2_DIR BIT(13)
-#define PCH_ID2_XTD BIT(14)
-#define PCH_ID_MSGVAL BIT(15)
-#define PCH_IF_CREQ_BUSY BIT(15)
-
-#define PCH_STATUS_INT 0x8000
-#define PCH_RP 0x00008000
-#define PCH_REC 0x00007f00
-#define PCH_TEC 0x000000ff
-
-#define PCH_TX_OK BIT(3)
-#define PCH_RX_OK BIT(4)
-#define PCH_EPASSIV BIT(5)
-#define PCH_EWARN BIT(6)
-#define PCH_BUS_OFF BIT(7)
-
-/* bit position of certain controller bits. */
-#define PCH_BIT_BRP_SHIFT 0
-#define PCH_BIT_SJW_SHIFT 6
-#define PCH_BIT_TSEG1_SHIFT 8
-#define PCH_BIT_TSEG2_SHIFT 12
-#define PCH_BIT_BRPE_BRPE_SHIFT 6
-
-#define PCH_MSK_BITT_BRP 0x3f
-#define PCH_MSK_BRPE_BRPE 0x3c0
-#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
-#define PCH_COUNTER_LIMIT 10
-
-#define PCH_CAN_CLK 50000000 /* 50MHz */
-
-/*
- * Define the number of message object.
- * PCH CAN communications are done via Message RAM.
- * The Message RAM consists of 32 message objects.
- */
-#define PCH_RX_OBJ_NUM 26
-#define PCH_TX_OBJ_NUM 6
-#define PCH_RX_OBJ_START 1
-#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
-#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
-#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
-
-#define PCH_FIFO_THRESH 16
-
-/* TxRqst2 show status of MsgObjNo.17~32 */
-#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
- (PCH_RX_OBJ_END - 16))
-
-enum pch_ifreg {
- PCH_RX_IFREG,
- PCH_TX_IFREG,
-};
-
-enum pch_can_err {
- PCH_STUF_ERR = 1,
- PCH_FORM_ERR,
- PCH_ACK_ERR,
- PCH_BIT1_ERR,
- PCH_BIT0_ERR,
- PCH_CRC_ERR,
- PCH_LEC_ALL,
-};
-
-enum pch_can_mode {
- PCH_CAN_ENABLE,
- PCH_CAN_DISABLE,
- PCH_CAN_ALL,
- PCH_CAN_NONE,
- PCH_CAN_STOP,
- PCH_CAN_RUN,
-};
-
-struct pch_can_if_regs {
- u32 creq;
- u32 cmask;
- u32 mask1;
- u32 mask2;
- u32 id1;
- u32 id2;
- u32 mcont;
- u32 data[4];
- u32 rsv[13];
-};
-
-struct pch_can_regs {
- u32 cont;
- u32 stat;
- u32 errc;
- u32 bitt;
- u32 intr;
- u32 opt;
- u32 brpe;
- u32 reserve;
- struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
- u32 reserve1[8];
- u32 treq1;
- u32 treq2;
- u32 reserve2[6];
- u32 data1;
- u32 data2;
- u32 reserve3[6];
- u32 canipend1;
- u32 canipend2;
- u32 reserve4[6];
- u32 canmval1;
- u32 canmval2;
- u32 reserve5[37];
- u32 srst;
-};
-
-struct pch_can_priv {
- struct can_priv can;
- struct pci_dev *dev;
- u32 tx_enable[PCH_TX_OBJ_END];
- u32 rx_enable[PCH_TX_OBJ_END];
- u32 rx_link[PCH_TX_OBJ_END];
- u32 int_enables;
- struct net_device *ndev;
- struct pch_can_regs __iomem *regs;
- struct napi_struct napi;
- int tx_obj; /* Point next Tx Obj index */
- int use_msi;
-};
-
-static const struct can_bittiming_const pch_can_bittiming_const = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024, /* 6bit + extended 4bit */
- .brp_inc = 1,
-};
-
-static const struct pci_device_id pch_pci_tbl[] = {
- {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
- {0,}
-};
-MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
-
-static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
-{
- iowrite32(ioread32(addr) | mask, addr);
-}
-
-static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
-{
- iowrite32(ioread32(addr) & ~mask, addr);
-}
-
-static void pch_can_set_run_mode(struct pch_can_priv *priv,
- enum pch_can_mode mode)
-{
- switch (mode) {
- case PCH_CAN_RUN:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
- break;
-
- case PCH_CAN_STOP:
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
- break;
-
- default:
- netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
- break;
- }
-}
-
-static void pch_can_set_optmode(struct pch_can_priv *priv)
-{
- u32 reg_val = ioread32(&priv->regs->opt);
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- reg_val |= PCH_OPT_SILENT;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
- reg_val |= PCH_OPT_LBACK;
-
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
- iowrite32(reg_val, &priv->regs->opt);
-}
-
-static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
-{
- int counter = PCH_COUNTER_LIMIT;
- u32 ifx_creq;
-
- iowrite32(num, creq_addr);
- while (counter) {
- ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
- if (!ifx_creq)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
-}
-
-static void pch_can_set_int_enables(struct pch_can_priv *priv,
- enum pch_can_mode interrupt_no)
-{
- switch (interrupt_no) {
- case PCH_CAN_DISABLE:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
- break;
-
- case PCH_CAN_ALL:
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
- break;
-
- case PCH_CAN_NONE:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
- break;
-
- default:
- netdev_err(priv->ndev, "Invalid interrupt number.\n");
- break;
- }
-}
-
-static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
- int set, enum pch_ifreg dir)
-{
- u32 ie;
-
- if (dir)
- ie = PCH_IF_MCONT_TXIE;
- else
- ie = PCH_IF_MCONT_RXIE;
-
- /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-
- /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
- &priv->regs->ifregs[dir].cmask);
-
- if (set) {
- /* Setting the MsgVal and RxIE/TxIE bits */
- pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
- pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
- } else {
- /* Clearing the MsgVal and RxIE/TxIE bits */
- pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
- pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
- }
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-}
-
-static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
-{
- int i;
-
- /* Traversing to obtain the object configured as receivers. */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
-}
-
-static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
-{
- int i;
-
- /* Traversing to obtain the object configured as transmit object. */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
-}
-
-static u32 pch_can_int_pending(struct pch_can_priv *priv)
-{
- return ioread32(&priv->regs->intr) & 0xffff;
-}
-
-static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
-{
- int i; /* Msg Obj ID (1~32) */
-
- for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
- iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
- iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
- iowrite32(0x0, &priv->regs->ifregs[0].id1);
- iowrite32(0x0, &priv->regs->ifregs[0].id2);
- iowrite32(0x0, &priv->regs->ifregs[0].mcont);
- iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
- PCH_CMASK_ARB | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- }
-}
-
-static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
-{
- int i;
-
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
-
- iowrite32(0x0, &priv->regs->ifregs[0].id1);
- iowrite32(0x0, &priv->regs->ifregs[0].id2);
-
- pch_can_bit_set(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_UMASK);
-
- /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
- if (i == PCH_RX_OBJ_END)
- pch_can_bit_set(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
- else
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
-
- iowrite32(0, &priv->regs->ifregs[0].mask1);
- pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
- 0x1fff | PCH_MASK2_MDIR_MXTD);
-
- /* Setting CMASK for writing */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
- PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- }
-
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
-
- /* Resetting DIR bit for reception */
- iowrite32(0x0, &priv->regs->ifregs[1].id1);
- iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
-
- /* Setting EOB bit for transmitter */
- iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
- &priv->regs->ifregs[1].mcont);
-
- iowrite32(0, &priv->regs->ifregs[1].mask1);
- pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
-
- /* Setting CMASK for writing */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
- PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
- }
-}
-
-static void pch_can_init(struct pch_can_priv *priv)
-{
- /* Stopping the Can device. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Clearing all the message object buffers. */
- pch_can_clear_if_buffers(priv);
-
- /* Configuring the respective message object as either rx/tx object. */
- pch_can_config_rx_tx_buffers(priv);
-
- /* Enabling the interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_ALL);
-}
-
-static void pch_can_release(struct pch_can_priv *priv)
-{
- /* Stooping the CAN device. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Disabling the interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
-
- /* Disabling all the receive object. */
- pch_can_set_rx_all(priv, 0);
-
- /* Disabling all the transmit object. */
- pch_can_set_tx_all(priv, 0);
-}
-
-/* This function clears interrupt(s) from the CAN device. */
-static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
-{
- /* Clear interrupt for transmit object */
- if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
- /* Setting CMASK for clearing the reception interrupts. */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
- &priv->regs->ifregs[0].cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
- } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
- /*
- * Setting CMASK for clearing interrupts for frame transmission.
- */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
- &priv->regs->ifregs[1].cmask);
-
- /* Resetting the ID registers. */
- pch_can_bit_set(&priv->regs->ifregs[1].id2,
- PCH_ID2_DIR | (0x7ff << 2));
- iowrite32(0x0, &priv->regs->ifregs[1].id1);
-
- /* Clearing NewDat, TxRqst & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
- PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
- PCH_IF_MCONT_TXRQXT);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
- }
-}
-
-static void pch_can_reset(struct pch_can_priv *priv)
-{
- /* write to sw reset register */
- iowrite32(1, &priv->regs->srst);
- iowrite32(0, &priv->regs->srst);
-}
-
-static void pch_can_error(struct net_device *ndev, u32 status)
-{
- struct sk_buff *skb;
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct can_frame *cf;
- u32 errc, lec;
- struct net_device_stats *stats = &(priv->ndev->stats);
- enum can_state state = priv->can.state;
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return;
-
- errc = ioread32(&priv->regs->errc);
- if (status & PCH_BUS_OFF) {
- pch_can_set_tx_all(priv, 0);
- pch_can_set_rx_all(priv, 0);
- state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
- can_bus_off(ndev);
- } else {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
- }
-
- /* Warning interrupt. */
- if (status & PCH_EWARN) {
- state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
- if (((errc & PCH_REC) >> 8) > 96)
- cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((errc & PCH_TEC) > 96)
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- netdev_dbg(ndev,
- "%s -> Error Counter is more than 96.\n", __func__);
- }
- /* Error passive interrupt. */
- if (status & PCH_EPASSIV) {
- priv->can.can_stats.error_passive++;
- state = CAN_STATE_ERROR_PASSIVE;
- cf->can_id |= CAN_ERR_CRTL;
- if (errc & PCH_RP)
- cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- if ((errc & PCH_TEC) > 127)
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
- netdev_dbg(ndev,
- "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
- }
-
- lec = status & PCH_LEC_ALL;
- switch (lec) {
- case PCH_STUF_ERR:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_FORM_ERR:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_ACK_ERR:
- cf->can_id |= CAN_ERR_ACK;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_BIT1_ERR:
- case PCH_BIT0_ERR:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_CRC_ERR:
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_LEC_ALL: /* Written by CPU. No error status */
- break;
- }
-
- priv->can.state = state;
- netif_receive_skb(skb);
-}
-
-static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
-{
- struct net_device *ndev = (struct net_device *)dev_id;
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- if (!pch_can_int_pending(priv))
- return IRQ_NONE;
-
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
-}
-
-static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
-{
- if (obj_id < PCH_FIFO_THRESH) {
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
- PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_INTPND);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
- } else if (obj_id > PCH_FIFO_THRESH) {
- pch_can_int_clr(priv, obj_id);
- } else if (obj_id == PCH_FIFO_THRESH) {
- int cnt;
- for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
- pch_can_int_clr(priv, cnt + 1);
- }
-}
-
-static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- struct sk_buff *skb;
- struct can_frame *cf;
-
- netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_MSGLOST);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- netif_receive_skb(skb);
-}
-
-static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
-{
- u32 reg;
- canid_t id;
- int rcv_pkts = 0;
- struct sk_buff *skb;
- struct can_frame *cf;
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- int i;
- u32 id2;
- u16 data_reg;
-
- do {
- /* Reading the message object from the Message RAM */
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
-
- /* Reading the MCONT register. */
- reg = ioread32(&priv->regs->ifregs[0].mcont);
-
- if (reg & PCH_IF_MCONT_EOB)
- break;
-
- /* If MsgLost bit set. */
- if (reg & PCH_IF_MCONT_MSGLOST) {
- pch_can_rx_msg_lost(ndev, obj_num);
- rcv_pkts++;
- quota--;
- obj_num++;
- continue;
- } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
- obj_num++;
- continue;
- }
-
- skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb) {
- netdev_err(ndev, "alloc_can_skb Failed\n");
- return rcv_pkts;
- }
-
- /* Get Received data */
- id2 = ioread32(&priv->regs->ifregs[0].id2);
- if (id2 & PCH_ID2_XTD) {
- id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
- id |= (((id2) & 0x1fff) << 16);
- cf->can_id = id | CAN_EFF_FLAG;
- } else {
- id = (id2 >> 2) & CAN_SFF_MASK;
- cf->can_id = id;
- }
-
- cf->len = can_cc_dlc2len((ioread32(&priv->regs->
- ifregs[0].mcont)) & 0xF);
-
- if (id2 & PCH_ID2_DIR) {
- cf->can_id |= CAN_RTR_FLAG;
- } else {
- for (i = 0; i < cf->len; i += 2) {
- data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
- cf->data[i] = data_reg;
- cf->data[i + 1] = data_reg >> 8;
- }
-
- stats->rx_bytes += cf->len;
- }
- stats->rx_packets++;
- rcv_pkts++;
- quota--;
- netif_receive_skb(skb);
-
- pch_fifo_thresh(priv, obj_num);
- obj_num++;
- } while (quota > 0);
-
- return rcv_pkts;
-}
-
-static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
-
- stats->tx_bytes += can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1,
- NULL);
- stats->tx_packets++;
- iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
- &priv->regs->ifregs[1].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
- if (int_stat == PCH_TX_OBJ_END)
- netif_wake_queue(ndev);
-}
-
-static int pch_can_poll(struct napi_struct *napi, int quota)
-{
- struct net_device *ndev = napi->dev;
- struct pch_can_priv *priv = netdev_priv(ndev);
- u32 int_stat;
- u32 reg_stat;
- int quota_save = quota;
-
- int_stat = pch_can_int_pending(priv);
- if (!int_stat)
- goto end;
-
- if (int_stat == PCH_STATUS_INT) {
- reg_stat = ioread32(&priv->regs->stat);
-
- if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
- ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
- pch_can_error(ndev, reg_stat);
- quota--;
- }
-
- if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
- pch_can_bit_clear(&priv->regs->stat,
- reg_stat & (PCH_TX_OK | PCH_RX_OK));
-
- int_stat = pch_can_int_pending(priv);
- }
-
- if (quota == 0)
- goto end;
-
- if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
- quota -= pch_can_rx_normal(ndev, int_stat, quota);
- } else if ((int_stat >= PCH_TX_OBJ_START) &&
- (int_stat <= PCH_TX_OBJ_END)) {
- /* Handle transmission interrupt */
- pch_can_tx_complete(ndev, int_stat);
- }
-
-end:
- napi_complete(napi);
- pch_can_set_int_enables(priv, PCH_CAN_ALL);
-
- return quota_save - quota;
-}
-
-static int pch_set_bittiming(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- const struct can_bittiming *bt = &priv->can.bittiming;
- u32 canbit;
- u32 bepe;
-
- /* Setting the CCE bit for accessing the Can Timing register. */
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
-
- canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
- canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
- canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
- canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
- bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
- iowrite32(canbit, &priv->regs->bitt);
- iowrite32(bepe, &priv->regs->brpe);
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
-
- return 0;
-}
-
-static void pch_can_start(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- if (priv->can.state != CAN_STATE_STOPPED)
- pch_can_reset(priv);
-
- pch_set_bittiming(ndev);
- pch_can_set_optmode(priv);
-
- pch_can_set_tx_all(priv, 1);
- pch_can_set_rx_all(priv, 1);
-
- /* Setting the CAN to run mode. */
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- return;
-}
-
-static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
-{
- int ret = 0;
-
- switch (mode) {
- case CAN_MODE_START:
- pch_can_start(ndev);
- netif_wake_queue(ndev);
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-static int pch_can_open(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- int retval;
-
- /* Registering the interrupt. */
- retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (retval) {
- netdev_err(ndev, "request_irq failed.\n");
- goto req_irq_err;
- }
-
- /* Open common can device */
- retval = open_candev(ndev);
- if (retval) {
- netdev_err(ndev, "open_candev() failed %d\n", retval);
- goto err_open_candev;
- }
-
- pch_can_init(priv);
- pch_can_start(ndev);
- napi_enable(&priv->napi);
- netif_start_queue(ndev);
-
- return 0;
-
-err_open_candev:
- free_irq(priv->dev->irq, ndev);
-req_irq_err:
- pch_can_release(priv);
-
- return retval;
-}
-
-static int pch_close(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- netif_stop_queue(ndev);
- napi_disable(&priv->napi);
- pch_can_release(priv);
- free_irq(priv->dev->irq, ndev);
- close_candev(ndev);
- priv->can.state = CAN_STATE_STOPPED;
- return 0;
-}
-
-static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct can_frame *cf = (struct can_frame *)skb->data;
- int tx_obj_no;
- int i;
- u32 id2;
-
- if (can_dev_dropped_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- tx_obj_no = priv->tx_obj;
- if (priv->tx_obj == PCH_TX_OBJ_END) {
- if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
- netif_stop_queue(ndev);
-
- priv->tx_obj = PCH_TX_OBJ_START;
- } else {
- priv->tx_obj++;
- }
-
- /* Setting the CMASK register. */
- pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
-
- /* If ID extended is set. */
- if (cf->can_id & CAN_EFF_FLAG) {
- iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
- id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
- } else {
- iowrite32(0, &priv->regs->ifregs[1].id1);
- id2 = (cf->can_id & CAN_SFF_MASK) << 2;
- }
-
- id2 |= PCH_ID_MSGVAL;
-
- /* If remote frame has to be transmitted.. */
- if (!(cf->can_id & CAN_RTR_FLAG))
- id2 |= PCH_ID2_DIR;
-
- iowrite32(id2, &priv->regs->ifregs[1].id2);
-
- /* Copy data to register */
- for (i = 0; i < cf->len; i += 2) {
- iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
- &priv->regs->ifregs[1].data[i / 2]);
- }
-
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
-
- /* Set the size of the data. Update if2_mcont */
- iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
- PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
-
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops pch_can_netdev_ops = {
- .ndo_open = pch_can_open,
- .ndo_stop = pch_close,
- .ndo_start_xmit = pch_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct ethtool_ops pch_can_ethtool_ops = {
- .get_ts_info = ethtool_op_get_ts_info,
-};
-
-static void pch_can_remove(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- unregister_candev(priv->ndev);
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pch_can_reset(priv);
- pci_iounmap(pdev, priv->regs);
- free_candev(priv->ndev);
-}
-
-static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
-{
- /* Clearing the IE, SIE and EIE bits of Can control register. */
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
-
- /* Appropriately setting them. */
- pch_can_bit_set(&priv->regs->cont,
- ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
-}
-
-/* This function retrieves interrupt enabled for the CAN device. */
-static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
-{
- /* Obtaining the status of IE, SIE and EIE interrupt bits. */
- return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
-}
-
-static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
- u32 buff_num, enum pch_ifreg dir)
-{
- u32 ie, enable;
-
- if (dir)
- ie = PCH_IF_MCONT_RXIE;
- else
- ie = PCH_IF_MCONT_TXIE;
-
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-
- if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
- ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
- enable = 1;
- else
- enable = 0;
-
- return enable;
-}
-
-static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, int set)
-{
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- if (set)
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
- else
- pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
-}
-
-static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num)
-{
- u32 link;
-
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
-
- if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
- link = 0;
- else
- link = 1;
- return link;
-}
-
-static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
-{
- return (ioread32(&priv->regs->treq1) & 0xffff) |
- (ioread32(&priv->regs->treq2) << 16);
-}
-
-static int __maybe_unused pch_can_suspend(struct device *dev_d)
-{
- int i;
- u32 buf_stat; /* Variable for reading the transmit buffer status. */
- int counter = PCH_COUNTER_LIMIT;
-
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct pch_can_priv *priv = netdev_priv(dev);
-
- /* Stop the CAN controller */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Indicate that we are aboutto/in suspend */
- priv->can.state = CAN_STATE_STOPPED;
-
- /* Waiting for all transmission to complete. */
- while (counter) {
- buf_stat = pch_can_get_buffer_status(priv);
- if (!buf_stat)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
-
- /* Save interrupt configuration and then disable them */
- priv->int_enables = pch_can_get_int_enables(priv);
- pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
-
- /* Save Tx buffer enable state */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
- PCH_TX_IFREG);
-
- /* Disable all Transmit buffers */
- pch_can_set_tx_all(priv, 0);
-
- /* Save Rx buffer enable state */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
- PCH_RX_IFREG);
- priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
- }
-
- /* Disable all Receive buffers */
- pch_can_set_rx_all(priv, 0);
-
- return 0;
-}
-
-static int __maybe_unused pch_can_resume(struct device *dev_d)
-{
- int i;
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct pch_can_priv *priv = netdev_priv(dev);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- /* Disabling all interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
-
- /* Setting the CAN device in Stop Mode. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Configuring the transmit and receive buffers. */
- pch_can_config_rx_tx_buffers(priv);
-
- /* Restore the CAN state */
- pch_set_bittiming(dev);
-
- /* Listen/Active */
- pch_can_set_optmode(priv);
-
- /* Enabling the transmit buffer. */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
-
- /* Configuring the receive buffer and enabling them. */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- /* Restore buffer link */
- pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
-
- /* Restore buffer enables */
- pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
- }
-
- /* Enable CAN Interrupts */
- pch_can_set_int_custom(priv);
-
- /* Restore Run Mode */
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
-
- return 0;
-}
-
-static int pch_can_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
-{
- struct pch_can_priv *priv = netdev_priv(dev);
- u32 errc = ioread32(&priv->regs->errc);
-
- bec->txerr = errc & PCH_TEC;
- bec->rxerr = (errc & PCH_REC) >> 8;
-
- return 0;
-}
-
-static int pch_can_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct net_device *ndev;
- struct pch_can_priv *priv;
- int rc;
- void __iomem *addr;
-
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
- goto probe_exit_endev;
- }
-
- rc = pci_request_regions(pdev, KBUILD_MODNAME);
- if (rc) {
- dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
- goto probe_exit_pcireq;
- }
-
- addr = pci_iomap(pdev, 1, 0);
- if (!addr) {
- rc = -EIO;
- dev_err(&pdev->dev, "Failed pci_iomap\n");
- goto probe_exit_ipmap;
- }
-
- ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
- if (!ndev) {
- rc = -ENOMEM;
- dev_err(&pdev->dev, "Failed alloc_candev\n");
- goto probe_exit_alloc_candev;
- }
-
- priv = netdev_priv(ndev);
- priv->ndev = ndev;
- priv->regs = addr;
- priv->dev = pdev;
- priv->can.bittiming_const = &pch_can_bittiming_const;
- priv->can.do_set_mode = pch_can_do_set_mode;
- priv->can.do_get_berr_counter = pch_can_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_LOOPBACK;
- priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
-
- ndev->irq = pdev->irq;
- ndev->flags |= IFF_ECHO;
-
- pci_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->netdev_ops = &pch_can_netdev_ops;
- ndev->ethtool_ops = &pch_can_ethtool_ops;
- priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
-
- netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
-
- rc = pci_enable_msi(priv->dev);
- if (rc) {
- netdev_err(ndev, "PCH CAN opened without MSI\n");
- priv->use_msi = 0;
- } else {
- netdev_err(ndev, "PCH CAN opened with MSI\n");
- pci_set_master(pdev);
- priv->use_msi = 1;
- }
-
- rc = register_candev(ndev);
- if (rc) {
- dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
- goto probe_exit_reg_candev;
- }
-
- return 0;
-
-probe_exit_reg_candev:
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
- free_candev(ndev);
-probe_exit_alloc_candev:
- pci_iounmap(pdev, addr);
-probe_exit_ipmap:
- pci_release_regions(pdev);
-probe_exit_pcireq:
- pci_disable_device(pdev);
-probe_exit_endev:
- return rc;
-}
-
-static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
- pch_can_suspend,
- pch_can_resume);
-
-static struct pci_driver pch_can_pci_driver = {
- .name = "pch_can",
- .id_table = pch_pci_tbl,
- .probe = pch_can_probe,
- .remove = pch_can_remove,
- .driver.pm = &pch_can_pm_ops,
-};
-
-module_pci_driver(pch_can_pci_driver);
-
-MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.94");
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b306cf554634..0a59eab35da7 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1882,17 +1882,17 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->chip_id = chip_id;
gpriv->max_channels = max_channels;
- if (gpriv->chip_id == RENESAS_RZG2L) {
- gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
- if (IS_ERR(gpriv->rstc1))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
- "failed to get rstp_n\n");
-
- gpriv->rstc2 = devm_reset_control_get_exclusive(&pdev->dev, "rstc_n");
- if (IS_ERR(gpriv->rstc2))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
- "failed to get rstc_n\n");
- }
+ gpriv->rstc1 = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "rstp_n");
+ if (IS_ERR(gpriv->rstc1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
+ "failed to get rstp_n\n");
+
+ gpriv->rstc2 = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "rstc_n");
+ if (IS_ERR(gpriv->rstc2))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
+ "failed to get rstc_n\n");
/* Peripheral clock */
gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 1218f9642f33..8c6fea661530 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -38,10 +38,13 @@ config CAN_ETAS_ES58X
will be called etas_es58x.
config CAN_GS_USB
- tristate "Geschwister Schneider UG interfaces"
+ tristate "Geschwister Schneider UG and candleLight compatible interfaces"
help
- This driver supports the Geschwister Schneider and bytewerk.org
- candleLight USB CAN interfaces USB/CAN devices
+ This driver supports the Geschwister Schneider and
+ bytewerk.org candleLight compatible
+ (https://github.com/candle-usb/candleLight_fw) USB/CAN
+ interfaces.
+
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 9c2c25fde3d1..838744d2ce34 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -66,6 +66,7 @@ enum gs_usb_breq {
GS_USB_BREQ_BT_CONST_EXT,
GS_USB_BREQ_SET_TERMINATION,
GS_USB_BREQ_GET_TERMINATION,
+ GS_USB_BREQ_GET_STATE,
};
enum gs_can_mode {
@@ -134,6 +135,8 @@ struct gs_device_config {
/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
/* GS_CAN_FEATURE_TERMINATION BIT(11) */
+#define GS_CAN_MODE_BERR_REPORTING BIT(12)
+/* GS_CAN_FEATURE_GET_STATE BIT(13) */
struct gs_device_mode {
__le32 mode;
@@ -174,7 +177,9 @@ struct gs_device_termination_state {
#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
#define GS_CAN_FEATURE_TERMINATION BIT(11)
-#define GS_CAN_FEATURE_MASK GENMASK(11, 0)
+#define GS_CAN_FEATURE_BERR_REPORTING BIT(12)
+#define GS_CAN_FEATURE_GET_STATE BIT(13)
+#define GS_CAN_FEATURE_MASK GENMASK(13, 0)
/* internal quirks - keep in GS_CAN_FEATURE space for now */
@@ -843,8 +848,6 @@ static int gs_can_open(struct net_device *netdev)
ctrlmode = dev->can.ctrlmode;
if (ctrlmode & CAN_CTRLMODE_FD) {
- flags |= GS_CAN_MODE_FD;
-
if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
else
@@ -911,25 +914,29 @@ static int gs_can_open(struct net_device *netdev)
/* flags */
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
- else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+
+ if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
flags |= GS_CAN_MODE_LISTEN_ONLY;
- /* Controller is not allowed to retry TX
- * this mode is unavailable on atmels uc3c hardware
- */
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
flags |= GS_CAN_MODE_ONE_SHOT;
- if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ if (ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ flags |= GS_CAN_MODE_BERR_REPORTING;
+
+ if (ctrlmode & CAN_CTRLMODE_FD)
+ flags |= GS_CAN_MODE_FD;
/* if hardware supports timestamps, enable it */
- if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) {
flags |= GS_CAN_MODE_HW_TIMESTAMP;
- /* start polling timestamp */
- if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* start polling timestamp */
gs_usb_timestamp_init(dev);
+ }
/* finally start device */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -954,6 +961,42 @@ static int gs_can_open(struct net_device *netdev)
return 0;
}
+static int gs_usb_get_state(const struct net_device *netdev,
+ struct can_berr_counter *bec,
+ enum can_state *state)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_state ds;
+ int rc;
+
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_GET_STATE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &ds, sizeof(ds),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (le32_to_cpu(ds.state) >= CAN_STATE_MAX)
+ return -EOPNOTSUPP;
+
+ *state = le32_to_cpu(ds.state);
+ bec->txerr = le32_to_cpu(ds.txerr);
+ bec->rxerr = le32_to_cpu(ds.rxerr);
+
+ return 0;
+}
+
+static int gs_usb_can_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ enum can_state state;
+
+ return gs_usb_get_state(netdev, bec, &state);
+}
+
static int gs_can_close(struct net_device *netdev)
{
int rc;
@@ -1153,6 +1196,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->ethtool_ops = &gs_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+ netdev->dev_id = channel;
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
@@ -1224,6 +1268,12 @@ static struct gs_can *gs_make_candev(unsigned int channel,
}
}
+ if (feature & GS_CAN_FEATURE_BERR_REPORTING)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
+
+ if (feature & GS_CAN_FEATURE_GET_STATE)
+ dev->can.do_get_berr_counter = gs_usb_can_get_berr_counter;
+
/* The CANtact Pro from LinkLayer Labs is based on the
* LPC54616 µC, which is affected by the NXP LPC USB transfer
* erratum. However, the current firmware (version 2) doesn't
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
index b20d951a0790..cf260044f0b9 100644
--- a/drivers/net/can/usb/kvaser_usb/Makefile
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -1,8 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_kvaser_usb_hydra.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index f6c0938027ec..ff10b3790d84 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context {
u32 echo_index;
};
+struct kvaser_usb_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+} __packed;
+
struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
@@ -104,13 +112,19 @@ struct kvaser_usb_net_priv {
struct can_priv can;
struct can_berr_counter bec;
+ /* subdriver-specific data */
+ void *sub_priv;
+
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
- struct completion start_comp, stop_comp, flush_comp;
+ struct completion start_comp, stop_comp, flush_comp,
+ get_busparams_comp;
struct usb_anchor tx_submitted;
+ struct kvaser_usb_busparams busparams_nominal, busparams_data;
+
spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
int active_tx_contexts;
struct kvaser_usb_tx_urb_context tx_contexts[];
@@ -120,11 +134,15 @@ struct kvaser_usb_net_priv {
* struct kvaser_usb_dev_ops - Device specific functions
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
+ * @dev_get_busparams: readback arbitration busparams
* @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
* @dev_setup_endpoints: setup USB in and out endpoints
* @dev_init_card: initialize card
+ * @dev_init_channel: initialize channel
+ * @dev_remove_channel: uninitialize channel
* @dev_get_software_info: get software info
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
@@ -140,12 +158,18 @@ struct kvaser_usb_net_priv {
*/
struct kvaser_usb_dev_ops {
int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
- int (*dev_set_bittiming)(struct net_device *netdev);
- int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_set_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
+ int (*dev_set_data_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
int (*dev_get_berr_counter)(const struct net_device *netdev,
struct can_berr_counter *bec);
int (*dev_setup_endpoints)(struct kvaser_usb *dev);
int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
+ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
int (*dev_get_software_info)(struct kvaser_usb *dev);
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 802e27c0eced..3a2bfaad1406 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -440,10 +440,6 @@ static int kvaser_usb_open(struct net_device *netdev)
if (err)
return err;
- err = kvaser_usb_setup_rx_urbs(dev);
- if (err)
- goto error;
-
err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
@@ -534,6 +530,93 @@ static int kvaser_usb_close(struct net_device *netdev)
return 0;
}
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err = -EOPNOTSUPP;
+
+ busparams.bitrate = cpu_to_le32(bt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ busparams.nsamples = 3;
+ else
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_busparams(priv);
+ if (err) {
+ /* Treat EOPNOTSUPP as success */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+ }
+
+ if (memcmp(&busparams, &priv->busparams_nominal,
+ sizeof(priv->busparams_nominal)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ if (!ops->dev_set_data_bittiming ||
+ !ops->dev_get_data_busparams)
+ return -EOPNOTSUPP;
+
+ busparams.bitrate = cpu_to_le32(dbt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_data_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_data_busparams(priv);
+ if (err)
+ return err;
+
+ if (memcmp(&busparams, &priv->busparams_data,
+ sizeof(priv->busparams_data)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
static void kvaser_usb_write_bulk_callback(struct urb *urb)
{
struct kvaser_usb_tx_urb_context *context = urb->context;
@@ -684,6 +767,7 @@ static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
for (i = 0; i < dev->nchannels; i++) {
@@ -699,6 +783,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
if (!dev->nets[i])
continue;
+ if (ops->dev_remove_channel)
+ ops->dev_remove_channel(dev->nets[i]);
+
free_candev(dev->nets[i]->netdev);
}
}
@@ -730,6 +817,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp);
+ init_completion(&priv->get_busparams_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
@@ -742,7 +830,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = ops->dev_set_bittiming;
+ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
priv->can.do_set_mode = ops->dev_set_mode;
if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
@@ -754,7 +842,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
@@ -772,17 +860,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
dev->nets[channel] = priv;
+ if (ops->dev_init_channel) {
+ err = ops->dev_init_channel(priv);
+ if (err)
+ goto err;
+ }
+
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
+ goto err;
}
netdev_dbg(netdev, "device registered\n");
return 0;
+
+err:
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
}
static int kvaser_usb_probe(struct usb_interface *intf,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 66f672ea631b..f688124d6d66 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -45,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
/* Minihydra command IDs */
#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_BUSPARAMS_REQ 17
+#define CMD_GET_BUSPARAMS_RESP 18
#define CMD_GET_CHIP_STATE_REQ 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_DRIVERMODE_REQ 21
@@ -196,21 +198,26 @@ struct kvaser_cmd_chip_state_event {
#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
struct kvaser_cmd_set_busparams {
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 nsamples;
+ struct kvaser_usb_busparams busparams_nominal;
u8 reserved0[4];
- __le32 bitrate_d;
- u8 tseg1_d;
- u8 tseg2_d;
- u8 sjw_d;
- u8 nsamples_d;
+ struct kvaser_usb_busparams busparams_data;
u8 canfd_mode;
u8 reserved1[7];
} __packed;
+/* Busparam type */
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01
+struct kvaser_cmd_get_busparams_req {
+ u8 type;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_cmd_get_busparams_res {
+ struct kvaser_usb_busparams busparams;
+ u8 reserved[20];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -281,6 +288,8 @@ struct kvaser_cmd {
struct kvaser_cmd_error_event error_event;
struct kvaser_cmd_set_busparams set_busparams_req;
+ struct kvaser_cmd_get_busparams_req get_busparams_req;
+ struct kvaser_cmd_get_busparams_res get_busparams_res;
struct kvaser_cmd_chip_state_event chip_state_event;
@@ -363,6 +372,10 @@ struct kvaser_cmd_ext {
} __packed;
} __packed;
+struct kvaser_usb_net_hydra_priv {
+ int pending_get_busparams_type;
+};
+
static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.name = "kvaser_usb_kcan",
.tseg1_min = 1,
@@ -840,6 +853,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
complete(&priv->flush_comp);
}
+static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ hydra = priv->sub_priv;
+ if (!hydra)
+ return;
+
+ switch (hydra->pending_get_busparams_type) {
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
+ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
+ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
+ hydra->pending_get_busparams_type);
+ break;
+ }
+ hydra->pending_get_busparams_type = -1;
+
+ complete(&priv->get_busparams_comp);
+}
+
static void
kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
u8 bus_status,
@@ -1326,6 +1372,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
kvaser_usb_hydra_state_event(dev, cmd);
break;
+ case CMD_GET_BUSPARAMS_RESP:
+ kvaser_usb_hydra_get_busparams_reply(dev, cmd);
+ break;
+
case CMD_ERROR_EVENT:
kvaser_usb_hydra_error_event(dev, cmd);
break;
@@ -1522,15 +1572,58 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
return err;
}
-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ int busparams_type)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
+ struct kvaser_cmd *cmd;
+ int err;
+
+ if (!hydra)
+ return -EINVAL;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ cmd->get_busparams_req.type = busparams_type;
+ hydra->pending_get_busparams_type = busparams_type;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
+}
+
+static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
+}
+
+static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = bt->prop_seg + bt->phase_seg1;
- int tseg2 = bt->phase_seg2;
- int sjw = bt->sjw;
int err;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1538,11 +1631,8 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
- cmd->set_busparams_req.sjw = (u8)sjw;
- cmd->set_busparams_req.tseg1 = (u8)tseg1;
- cmd->set_busparams_req.tseg2 = (u8)tseg2;
- cmd->set_busparams_req.nsamples = 1;
+ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
+ sizeof(cmd->set_busparams_req.busparams_nominal));
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
@@ -1556,15 +1646,12 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
return err;
}
-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = dbt->prop_seg + dbt->phase_seg1;
- int tseg2 = dbt->phase_seg2;
- int sjw = dbt->sjw;
int err;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1572,11 +1659,8 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
- cmd->set_busparams_req.sjw_d = (u8)sjw;
- cmd->set_busparams_req.tseg1_d = (u8)tseg1;
- cmd->set_busparams_req.tseg2_d = (u8)tseg2;
- cmd->set_busparams_req.nsamples_d = 1;
+ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
+ sizeof(cmd->set_busparams_req.busparams_data));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1683,6 +1767,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
+ if (!hydra)
+ return -ENOMEM;
+
+ priv->sub_priv = hydra;
+
+ return 0;
+}
+
static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@@ -2027,10 +2124,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_set_mode = kvaser_usb_hydra_set_mode,
.dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
.dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
.dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
.dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_init_channel = kvaser_usb_hydra_init_channel,
.dev_get_software_info = kvaser_usb_hydra_get_software_info,
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 19958037720f..1c2f99ce4c6c 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -56,6 +57,9 @@
#define CMD_RX_EXT_MESSAGE 14
#define CMD_TX_EXT_MESSAGE 15
#define CMD_SET_BUS_PARAMS 16
+#define CMD_GET_BUS_PARAMS 17
+#define CMD_GET_BUS_PARAMS_REPLY 18
+#define CMD_GET_CHIP_STATE 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_CTRL_MODE 21
#define CMD_RESET_CHIP 24
@@ -70,10 +74,13 @@
#define CMD_GET_CARD_INFO_REPLY 35
#define CMD_GET_SOFTWARE_INFO 38
#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_ERROR_EVENT 45
#define CMD_FLUSH_QUEUE 48
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
#define CMD_LEAF_LOG_MESSAGE 106
@@ -83,6 +90,8 @@
#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
+
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@@ -157,11 +166,7 @@ struct usbcan_cmd_softinfo {
struct kvaser_cmd_busparams {
u8 tid;
u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
+ struct kvaser_usb_busparams busparams;
} __packed;
struct kvaser_cmd_tx_can {
@@ -230,7 +235,7 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
-struct leaf_cmd_error_event {
+struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
__le16 time[3];
@@ -242,7 +247,7 @@ struct leaf_cmd_error_event {
u8 error_factor;
} __packed;
-struct usbcan_cmd_error_event {
+struct usbcan_cmd_can_error_event {
u8 tid;
u8 padding;
u8 tx_errors_count_ch0;
@@ -254,6 +259,28 @@ struct usbcan_cmd_error_event {
__le16 time;
} __packed;
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
+#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 timestamp[3];
+ __le16 padding;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+ __le16 timestamp;
+ __le16 padding;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -278,6 +305,28 @@ struct leaf_cmd_log_message {
u8 data[8];
} __packed;
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
+struct kvaser_cmd_cap_req {
+ __le16 padding0;
+ __le16 cap_cmd;
+ __le16 padding1;
+ __le16 channel;
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
+#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 padding;
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+} __packed;
+
struct kvaser_cmd {
u8 len;
u8 id;
@@ -293,14 +342,18 @@ struct kvaser_cmd {
struct leaf_cmd_softinfo softinfo;
struct leaf_cmd_rx_can rx_can;
struct leaf_cmd_chip_state_event chip_state_event;
- struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_can_error_event can_error_event;
struct leaf_cmd_log_message log_message;
+ struct leaf_cmd_error_event error_event;
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
} __packed leaf;
union {
struct usbcan_cmd_softinfo softinfo;
struct usbcan_cmd_rx_can rx_can;
struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
} __packed usbcan;
@@ -323,7 +376,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
[CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event),
+ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res),
+ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
/* ignored events: */
[CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
};
@@ -337,7 +393,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
/* ignored events: */
[CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
};
@@ -365,6 +422,15 @@ struct kvaser_usb_err_summary {
};
};
+struct kvaser_usb_net_leaf_priv {
+ struct kvaser_usb_net_priv *net;
+
+ struct delayed_work chip_state_req_work;
+
+ /* started but not reported as bus-on yet */
+ bool joining_bus;
+};
+
static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
.name = "kvaser_usb_ucii",
.tseg1_min = 4,
@@ -606,6 +672,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
+ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
+
if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
/* Firmware expects bittiming parameters calculated for 16MHz
* clock, regardless of the actual clock
@@ -693,6 +762,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_GET_CAPABILITIES_REQ;
+ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->u.leaf.cap_res.status);
+
+ if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ value = le32_to_cpu(cmd->u.leaf.cap_res.value);
+ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade device firmware.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
+{
+ int err = 0;
+
+ if (dev->driver_info->family == KVASER_LEAF)
+ err = kvaser_usb_leaf_get_capabilities_leaf(dev);
+
+ return err;
+}
+
static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -721,7 +900,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
struct sk_buff *skb;
struct can_frame *cf;
@@ -774,11 +953,22 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return err;
}
+static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
+{
+ struct kvaser_usb_net_leaf_priv *leaf =
+ container_of(work, struct kvaser_usb_net_leaf_priv,
+ chip_state_req_work.work);
+ struct kvaser_usb_net_priv *priv = leaf->net;
+
+ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
+}
+
static void
kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_err_summary *es,
struct can_frame *cf)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
struct kvaser_usb *dev = priv->dev;
struct net_device_stats *stats = &priv->netdev->stats;
enum can_state cur_state, new_state, tx_state, rx_state;
@@ -792,20 +982,32 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state = CAN_STATE_BUS_OFF;
} else if (es->status & M16C_STATE_BUS_PASSIVE) {
new_state = CAN_STATE_ERROR_PASSIVE;
- } else if (es->status & M16C_STATE_BUS_ERROR) {
+ } else if ((es->status & M16C_STATE_BUS_ERROR) &&
+ cur_state >= CAN_STATE_BUS_OFF) {
/* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if (es->txerr >= 128 || es->rxerr >= 128)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->txerr >= 96 || es->rxerr >= 96)
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
+ } else if (es->txerr >= 128 || es->rxerr >= 128) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->txerr >= 96 || es->rxerr >= 96) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
}
- if (!es->status)
- new_state = CAN_STATE_ERROR_ACTIVE;
+ /* 0bfd:0124 FW 4.18.778 was observed to send the initial
+ * CMD_CHIP_STATE_EVENT after CMD_START_CHIP with M16C_STATE_BUS_OFF
+ * bit set if the channel was bus-off when it was last stopped (even
+ * across chip resets). This bit will clear shortly afterwards, without
+ * triggering a second unsolicited chip state event.
+ * Ignore this initial bus-off.
+ */
+ if (leaf->joining_bus) {
+ if (new_state == CAN_STATE_BUS_OFF) {
+ netdev_dbg(priv->netdev, "ignoring bus-off during startup");
+ new_state = cur_state;
+ } else {
+ leaf->joining_bus = false;
+ }
+ }
if (new_state != cur_state) {
tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
@@ -815,7 +1017,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
}
if (priv->can.restart_ms &&
- cur_state >= CAN_STATE_BUS_OFF &&
+ cur_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
@@ -849,6 +1051,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
struct sk_buff *skb;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_leaf_priv *leaf;
enum can_state old_state, new_state;
if (es->channel >= dev->nchannels) {
@@ -858,8 +1061,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
priv = dev->nets[es->channel];
+ leaf = priv->sub_priv;
stats = &priv->netdev->stats;
+ /* Ignore e.g. state change to bus-off reported just after stopping */
+ if (!netif_running(priv->netdev))
+ return;
+
/* Update all of the CAN interface's state and error counters before
* trying any memory allocation that can actually fail with -ENOMEM.
*
@@ -874,6 +1082,17 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
new_state = priv->can.state;
+ /* If there are errors, request status updates periodically as we do
+ * not get automatic notifications of improved state.
+ * Also request updates if we saw a stale BUS_OFF during startup
+ * (joining_bus).
+ */
+ if (new_state < CAN_STATE_BUS_OFF &&
+ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE ||
+ leaf->joining_bus))
+ schedule_delayed_work(&leaf->chip_state_req_work,
+ msecs_to_jiffies(500));
+
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -891,7 +1110,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
+ old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
@@ -990,11 +1209,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
case CMD_CAN_ERROR_EVENT:
es.channel = 0;
- es.status = cmd->u.usbcan.error_event.status_ch0;
- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.status = cmd->u.usbcan.can_error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch1;
+ cmd->u.usbcan.can_error_event.status_ch1;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
/* The USBCAN firmware supports up to 2 channels.
@@ -1002,13 +1221,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
*/
if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
es.channel = 1;
- es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.status = cmd->u.usbcan.can_error_event.status_ch1;
es.txerr =
- cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
es.rxerr =
- cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch0;
+ cmd->u.usbcan.can_error_event.status_ch0;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
}
break;
@@ -1025,11 +1244,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
switch (cmd->id) {
case CMD_CAN_ERROR_EVENT:
- es.channel = cmd->u.leaf.error_event.channel;
- es.status = cmd->u.leaf.error_event.status;
- es.txerr = cmd->u.leaf.error_event.tx_errors_count;
- es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ es.channel = cmd->u.leaf.can_error_event.channel;
+ es.status = cmd->u.leaf.can_error_event.status;
+ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
break;
case CMD_LEAF_LOG_MESSAGE:
es.channel = cmd->u.leaf.log_message.channel;
@@ -1162,6 +1381,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
netif_rx(skb);
}
+static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u16 info1 = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
+ break;
+ case KVASER_USBCAN:
+ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
+ break;
+ }
+
+ /* info1 will contain the offending cmd_no */
+ switch (info1) {
+ case CMD_SET_CTRL_MODE:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_CTRL_MODE error in parameter\n");
+ break;
+
+ case CMD_SET_BUS_PARAMS:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_BUS_PARAMS error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ info1);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 error_code = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ error_code = cmd->u.leaf.error_event.error_code;
+ break;
+ case KVASER_USBCAN:
+ error_code = cmd->u.usbcan.error_event.error_code;
+ break;
+ }
+
+ switch (error_code) {
+ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
+ /* Received additional CAN message, when firmware TX queue is
+ * already full. Something is wrong with the driver.
+ * This should never happen!
+ */
+ dev_err(&dev->intf->dev,
+ "Received error event TX_QUEUE_FULL\n");
+ break;
+ case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
+ kvaser_usb_leaf_error_event_parameter(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n", error_code);
+ break;
+ }
+}
+
static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -1202,6 +1489,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
complete(&priv->stop_comp);
}
+static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.busparams.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
+ sizeof(priv->busparams_nominal));
+
+ complete(&priv->get_busparams_comp);
+}
+
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -1240,6 +1546,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_tx_acknowledge(dev, cmd);
break;
+ case CMD_ERROR_EVENT:
+ kvaser_usb_leaf_error_event(dev, cmd);
+ break;
+
+ case CMD_GET_BUS_PARAMS_REPLY:
+ kvaser_usb_leaf_get_busparams_reply(dev, cmd);
+ break;
+
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->driver_info->family != KVASER_USBCAN)
@@ -1318,8 +1632,11 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
+ leaf->joining_bus = true;
+
reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
@@ -1336,10 +1653,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
reinit_completion(&priv->stop_comp);
+ cancel_delayed_work(&leaf->chip_state_req_work);
+
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
if (err)
@@ -1386,10 +1706,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
return 0;
}
-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf;
+
+ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
+ if (!leaf)
+ return -ENOMEM;
+
+ leaf->net = priv;
+ INIT_DELAYED_WORK(&leaf->chip_state_req_work,
+ kvaser_usb_leaf_chip_state_req_work);
+
+ priv->sub_priv = leaf;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+
+ if (leaf)
+ cancel_delayed_work_sync(&leaf->chip_state_req_work);
+}
+
+static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
int rc;
@@ -1402,15 +1747,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
cmd->u.busparams.channel = priv->channel;
cmd->u.busparams.tid = 0xff;
- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- cmd->u.busparams.sjw = bt->sjw;
- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- cmd->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- cmd->u.busparams.no_samp = 3;
- else
- cmd->u.busparams.no_samp = 1;
+ memcpy(&cmd->u.busparams.busparams, busparams,
+ sizeof(cmd->u.busparams.busparams));
rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
@@ -1418,16 +1756,40 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
return rc;
}
+static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ if (priv->dev->driver_info->family == KVASER_USBCAN)
+ return -EOPNOTSUPP;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
enum can_mode mode)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
switch (mode) {
case CAN_MODE_START:
kvaser_usb_unlink_tx_urbs(priv);
+ leaf->joining_bus = true;
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
@@ -1479,14 +1841,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_set_mode = kvaser_usb_leaf_set_mode,
.dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_get_busparams = kvaser_usb_leaf_get_busparams,
.dev_set_data_bittiming = NULL,
+ .dev_get_data_busparams = NULL,
.dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
.dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_init_channel = kvaser_usb_leaf_init_channel,
+ .dev_remove_channel = kvaser_usb_leaf_remove_channel,
.dev_get_software_info = kvaser_usb_leaf_get_software_info,
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
- .dev_get_capabilities = NULL,
+ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 67c2ff407d06..ffa38f533c35 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -245,7 +245,8 @@ struct ucan_message_in {
/* CAN transmission complete
* (type == UCAN_IN_TX_COMPLETE)
*/
- struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
+ DECLARE_FLEX_ARRAY(struct ucan_tx_complete_entry_t,
+ can_tx_complete_msg);
} __aligned(0x4) msg;
} __packed __aligned(0x4);
@@ -1581,7 +1582,7 @@ static void ucan_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (up) {
- unregister_netdev(up->netdev);
+ unregister_candev(up->netdev);
free_candev(up->netdev);
}
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 26a472d2ea58..4068d962203d 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -236,7 +236,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
netif_carrier_off(peer);
- err = rtnl_configure_link(peer, ifmp);
+ err = rtnl_configure_link(peer, ifmp, 0, NULL);
if (err < 0)
goto unregister_network_device;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 07507b4820d7..c26755f662c1 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -18,6 +18,7 @@ config NET_DSA_BCM_SF2
config NET_DSA_LOOP
tristate "DSA mock-up Ethernet switch chip support"
+ select NET_DSA_TAG_NONE
select FIXED_PHY
help
This enables support for a fake mock-up switch chip which
@@ -99,6 +100,7 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
+ select NET_DSA_TAG_NONE
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index 90b525160b71..ebaa4a80d544 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -2,6 +2,7 @@
menuconfig B53
tristate "Broadcom BCM53xx managed switch support"
depends on NET_DSA
+ select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
select NET_DSA_TAG_BRCM_PREPEND
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 951f7935c872..595a548bb0a8 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1176,11 +1176,6 @@ static int hellcreek_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
- int ret;
-
- ret = devlink_info_driver_name_put(req, "hellcreek");
- if (ret)
- return ret;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 7d746cd9ca1b..1cb41c36bd47 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -29,8 +29,7 @@ static const struct regmap_config lan9303_i2c_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static int lan9303_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lan9303_i2c_probe(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev;
int ret;
@@ -106,7 +105,7 @@ static struct i2c_driver lan9303_i2c_driver = {
.name = "LAN9303_I2C",
.of_match_table = of_match_ptr(lan9303_i2c_of_match),
},
- .probe = lan9303_i2c_probe,
+ .probe_new = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
.shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 06b1efdb5e7d..913f83ef013c 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -3,6 +3,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
depends on NET_DSA
select NET_DSA_TAG_KSZ
+ select NET_DSA_TAG_NONE
help
This driver adds support for Microchip KSZ9477 series switch and
KSZ8795/KSZ88x3 switch chips.
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 8582b4b67d98..ea05abfbd51d 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -57,5 +57,6 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_detect(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
+int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index bd3b133e7085..003b0ac2854c 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -76,6 +76,57 @@ int ksz8_reset_switch(struct ksz_device *dev)
return 0;
}
+static int ksz8863_change_mtu(struct ksz_device *dev, int frame_size)
+{
+ u8 ctrl2 = 0;
+
+ if (frame_size <= KSZ8_LEGAL_PACKET_SIZE)
+ ctrl2 |= KSZ8863_LEGAL_PACKET_ENABLE;
+ else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ ctrl2 |= KSZ8863_HUGE_PACKET_ENABLE;
+
+ return ksz_rmw8(dev, REG_SW_CTRL_2, KSZ8863_LEGAL_PACKET_ENABLE |
+ KSZ8863_HUGE_PACKET_ENABLE, ctrl2);
+}
+
+static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
+{
+ u8 ctrl1 = 0, ctrl2 = 0;
+ int ret;
+
+ if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
+ ctrl2 |= SW_LEGAL_PACKET_DISABLE;
+ else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ ctrl1 |= SW_HUGE_PACKET;
+
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
+ if (ret)
+ return ret;
+
+ return ksz_rmw8(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, ctrl2);
+}
+
+int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size;
+
+ if (!dsa_is_cpu_port(dev->ds, port))
+ return 0;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ switch (dev->chip_id) {
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ return ksz8795_change_mtu(dev, frame_size);
+ case KSZ8830_CHIP_ID:
+ return ksz8863_change_mtu(dev, frame_size);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
{
u8 hi, lo;
@@ -1233,8 +1284,6 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
masks = dev->info->masks;
regs = dev->info->regs;
- /* Switch marks the maximum frame with extra byte as oversize. */
- ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
p = &dev->ports[dev->cpu_port];
@@ -1308,6 +1357,18 @@ int ksz8_setup(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
int i;
+ ds->mtu_enforcement_ingress = true;
+
+ /* We rely on software untagging on the CPU port, so that we
+ * can support both tagged and untagged VLANs
+ */
+ ds->untag_bridge_pvid = true;
+
+ /* VLAN filtering is partly controlled by the global VLAN
+ * Enable flag
+ */
+ ds->vlan_filtering_is_global = true;
+
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
/* Enable automatic fast aging when link changed detected. */
@@ -1367,16 +1428,6 @@ int ksz8_switch_init(struct ksz_device *dev)
dev->phy_port_cnt = dev->info->port_cnt - 1;
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
- /* We rely on software untagging on the CPU port, so that we
- * can support both tagged and untagged VLANs
- */
- dev->ds->untag_bridge_pvid = true;
-
- /* VLAN filtering is partly controlled by the global VLAN
- * Enable flag
- */
- dev->ds->vlan_filtering_is_global = true;
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 77487d611824..7a57c6088f80 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -48,6 +48,9 @@
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define KSZ8863_HUGE_PACKET_ENABLE BIT(2)
+#define KSZ8863_LEGAL_PACKET_ENABLE BIT(1)
+
#define REG_SW_CTRL_3 0x05
#define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3)
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index ddb40838181e..2f4623f3bd85 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -152,11 +152,10 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
&regmap_smi[i], dev,
&rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&mdiodev->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz8863_regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&mdiodev->dev,
+ PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ ksz8863_regmap_config[i].val_bits);
}
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index a6a0321a8931..47b54ecf2c6f 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -45,24 +45,15 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
{
- u16 frame_size, max_frame = 0;
- int i;
-
- frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ u16 frame_size;
- /* Cache the per-port MTU setting */
- dev->ports[port].max_frame = frame_size;
+ if (!dsa_is_cpu_port(dev->ds, port))
+ return 0;
- for (i = 0; i < dev->info->port_cnt; i++)
- max_frame = max(max_frame, dev->ports[i].max_frame);
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
- REG_SW_MTU_MASK, max_frame);
-}
-
-int ksz9477_max_mtu(struct ksz_device *dev, int port)
-{
- return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ REG_SW_MTU_MASK, frame_size);
}
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
@@ -195,7 +186,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
/* KSZ9893 compatible chips do not support refclk configuration */
if (dev->chip_id == KSZ9893_CHIP_ID ||
- dev->chip_id == KSZ8563_CHIP_ID)
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
return 0;
data8 = SW_ENABLE_REFCLKO;
@@ -1142,6 +1134,8 @@ int ksz9477_setup(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
int ret = 0;
+ ds->mtu_enforcement_ingress = true;
+
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
true);
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index 00862c4cfb7f..7c5bb3032772 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -50,7 +50,6 @@ int ksz9477_mdb_add(struct ksz_device *dev, int port,
int ksz9477_mdb_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
-int ksz9477_max_mtu(struct ksz_device *dev, int port);
void ksz9477_config_cpu_port(struct dsa_switch *ds);
int ksz9477_enable_stp_addr(struct ksz_device *dev);
int ksz9477_reset_switch(struct ksz_device *dev);
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 3763930dc6fc..c1a633ca1e6d 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -14,8 +14,7 @@
KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
-static int ksz9477_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
struct regmap_config rc;
struct ksz_device *dev;
@@ -30,17 +29,17 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
rc.lock_arg = &dev->regmap_mutex;
dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&i2c->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&i2c->dev, PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ ksz9477_regmap_config[i].val_bits);
}
}
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
+ dev->irq = i2c->irq;
+
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
@@ -101,7 +100,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
},
{
.compatible = "microchip,ksz9563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ9563]
},
{
.compatible = "microchip,ksz8563",
@@ -120,7 +119,7 @@ static struct i2c_driver ksz9477_i2c_driver = {
.name = "ksz9477-switch",
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
- .probe = ksz9477_i2c_probe,
+ .probe_new = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
.shutdown = ksz9477_i2c_shutdown,
.id_table = ksz9477_i2c_id,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 53c68d286dd3..cc457fa64939 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1615,6 +1615,4 @@
#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
-#define KSZ9477_MAX_FRAME_SIZE 9000
-
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index d612181b3226..423f944cc34c 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -14,6 +14,7 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of_mdio.h>
@@ -69,6 +70,43 @@ struct ksz_stats_raw {
u64 tx_discards;
};
+struct ksz88xx_stats_raw {
+ u64 rx;
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 tx;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
static const struct ksz_mib_names ksz88xx_mib_names[] = {
{ 0x00, "rx" },
{ 0x01, "rx_hi" },
@@ -155,6 +193,7 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.w_phy = ksz8_w_phy,
.r_mib_cnt = ksz8_r_mib_cnt,
.r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
.freeze_mib = ksz8_freeze_mib,
.port_init_cnt = ksz8_port_init_cnt,
.fdb_dump = ksz8_fdb_dump,
@@ -171,6 +210,7 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.reset = ksz8_reset_switch,
.init = ksz8_switch_init,
.exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
};
static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
@@ -206,7 +246,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .max_mtu = ksz9477_max_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -243,7 +282,6 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
- .max_mtu = ksz9477_max_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -1039,6 +1077,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
+ .port_nirqs = 3,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1282,6 +1321,31 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.gbit_capable = {true, true, true},
},
+ [KSZ9563] = {
+ .chip_id = KSZ9563_CHIP_ID,
+ .dev_name = "KSZ9563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 3,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ },
+
[KSZ9567] = {
.chip_id = KSZ9567_CHIP_ID,
.dev_name = "KSZ9567",
@@ -1557,6 +1621,55 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
spin_unlock(&mib->stats64_lock);
}
+void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz88xx_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz88xx_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
static void ksz_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
@@ -2389,7 +2502,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
if (dev->chip_id == KSZ8830_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
- dev->chip_id == KSZ9893_CHIP_ID)
+ dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
if (dev->chip_id == KSZ9477_CHIP_ID ||
@@ -2473,10 +2587,29 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- if (!dev->dev_ops->max_mtu)
- return -EOPNOTSUPP;
+ switch (dev->chip_id) {
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8830_CHIP_ID:
+ return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ }
- return dev->dev_ops->max_mtu(dev, port);
+ return -EOPNOTSUPP;
}
static void ksz_set_xmii(struct ksz_device *dev, int port,
@@ -2509,7 +2642,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
data8 |= bitval[P_RGMII_SEL];
/* On KSZ9893, disable RGMII in-band status support */
if (dev->chip_id == KSZ9893_CHIP_ID ||
- dev->chip_id == KSZ8563_CHIP_ID)
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -2782,6 +2916,8 @@ static int ksz_switch_detect(struct ksz_device *dev)
if (id4 == SKU_ID_KSZ8563)
dev->chip_id = KSZ8563_CHIP_ID;
+ else if (id4 == SKU_ID_KSZ9563)
+ dev->chip_id = KSZ9563_CHIP_ID;
else
dev->chip_id = KSZ9893_CHIP_ID;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 9cfa179575ce..055d61ff3fb8 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -95,7 +95,6 @@ struct ksz_port {
struct ksz_port_mib mib;
phy_interface_t interface;
- u16 max_frame;
u32 rgmii_tx_val;
u32 rgmii_rx_val;
struct ksz_device *ksz_dev;
@@ -154,6 +153,7 @@ enum ksz_model {
KSZ9896,
KSZ9897,
KSZ9893,
+ KSZ9563,
KSZ9567,
LAN9370,
LAN9371,
@@ -172,6 +172,7 @@ enum ksz_chip_id {
KSZ9896_CHIP_ID = 0x00989600,
KSZ9897_CHIP_ID = 0x00989700,
KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9563_CHIP_ID = 0x00956300,
KSZ9567_CHIP_ID = 0x00956700,
LAN9370_CHIP_ID = 0x00937000,
LAN9371_CHIP_ID = 0x00937100,
@@ -320,7 +321,6 @@ struct ksz_dev_ops {
void (*get_caps)(struct ksz_device *dev, int port,
struct phylink_config *config);
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
- int (*max_mtu)(struct ksz_device *dev, int port);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
void (*phylink_mac_config)(struct ksz_device *dev, int port,
@@ -345,6 +345,7 @@ void ksz_switch_remove(struct ksz_device *dev);
void ksz_init_mib_timer(struct ksz_device *dev);
void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port);
void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
bool ksz_get_gbit(struct ksz_device *dev, int port);
phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit);
@@ -454,6 +455,11 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
return regmap_bulk_write(dev->regmap[2], reg, val, 2);
}
+static inline int ksz_rmw8(struct ksz_device *dev, int offset, u8 mask, u8 val)
+{
+ return regmap_update_bits(dev->regmap[0], offset, mask, val);
+}
+
static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
u8 *data)
{
@@ -551,6 +557,7 @@ static inline int is_lan937x(struct ksz_device *dev)
/* KSZ9893, KSZ9563, KSZ8563 specific register */
#define REG_CHIP_ID4 0x0f
#define SKU_ID_KSZ8563 0x3c
+#define SKU_ID_KSZ9563 0x1c
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROT_RATE 10
@@ -585,6 +592,12 @@ static inline int is_lan937x(struct ksz_device *dev)
#define PORT_SRC_PHY_INT 1
+#define KSZ8795_HUGE_PACKET_SIZE 2000
+#define KSZ8863_HUGE_PACKET_SIZE 1916
+#define KSZ8863_NORMAL_PACKET_SIZE 1536
+#define KSZ8_LEGAL_PACKET_SIZE 1518
+#define KSZ9477_MAX_FRAME_SIZE 9000
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 1b6ab891b986..96c52e8fb51b 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -71,11 +71,9 @@ static int ksz_spi_probe(struct spi_device *spi)
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&spi->dev, PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ regmap_config[i].val_bits);
}
}
@@ -163,7 +161,7 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz9563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ9563]
},
{
.compatible = "microchip,ksz8563",
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index 7e4f307a0387..06d3d0308cba 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -242,7 +242,11 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
}
/* Write the frame size in PORT_MAX_FR_SIZE register */
- ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+ ret = ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+ if (ret) {
+ dev_err(ds->dev, "failed to update mtu for port %d\n", port);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 937cb22cb3d4..ba4fff8690aa 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -4077,6 +4077,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4349,6 +4350,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5032,6 +5034,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -5076,6 +5079,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 1266eabee086..a08dab75e0c0 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -821,11 +821,6 @@ int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- err = devlink_info_driver_name_put(req, "mv88e6xxx");
- if (err)
- return err;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 5c4195c635b0..f79cf716c541 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -133,6 +133,15 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
}
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 2 && port != 5 && port != 6)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
{
u16 reg;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index cb04243f37c1..aec9d4fd20e3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -332,6 +332,8 @@ int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
int pause);
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index dd3a18cc89dd..3b738cb2ae6e 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1048,21 +1048,14 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
*/
config->legacy_pre_march2020 = false;
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD;
+
__set_bit(ocelot->ports[port]->phy_mode,
config->supported_interfaces);
}
-static void felix_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
-
- if (felix->info->phylink_validate)
- felix->info->phylink_validate(ocelot, port, supported, state);
-}
-
static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
int port,
phy_interface_t iface)
@@ -1370,7 +1363,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
ocelot->map = felix->info->map;
- ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->vcap_pol.base = felix->info->vcap_pol_base;
@@ -2050,7 +2042,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_get_caps = felix_phylink_get_caps,
- .phylink_validate = felix_phylink_validate,
.phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index c9c29999c336..be22d6ccd7c8 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -28,7 +28,6 @@ struct felix_info {
const struct ocelot_ops *ops;
const u32 *port_modes;
int num_mact_rows;
- const struct ocelot_stat_layout *stats_layout;
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
@@ -52,9 +51,6 @@ struct felix_info {
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
- void (*phylink_validate)(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 26a35ae322d1..01ac70fd7ddf 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -565,10 +565,6 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
- OCELOT_COMMON_STATS,
-};
-
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 3},
[VCAP_ES0_IGR_PORT] = { 3, 3},
@@ -885,35 +881,6 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
-static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
@@ -2575,7 +2542,6 @@ static const struct felix_info felix_info_vsc9959 = {
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
- .stats_layout = vsc9959_stats_layout,
.vcap = vsc9959_vcap_props,
.vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
@@ -2588,7 +2554,6 @@ static const struct felix_info felix_info_vsc9959 = {
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
- .phylink_validate = vsc9959_phylink_validate,
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 7af33b2c685d..88ed3a2e487a 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -543,10 +543,6 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
- OCELOT_COMMON_STATS,
-};
-
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4},
[VCAP_ES0_IGR_PORT] = { 4, 4},
@@ -840,32 +836,6 @@ static int vsc9953_reset(struct ocelot *ocelot)
return 0;
}
-static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
@@ -996,7 +966,6 @@ static const struct felix_info seville_info_vsc9953 = {
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
- .stats_layout = vsc9953_stats_layout,
.vcap = vsc9953_vcap_props,
.vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
@@ -1007,7 +976,6 @@ static const struct felix_info seville_info_vsc9953 = {
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
- .phylink_validate = vsc9953_phylink_validate,
.port_modes = vsc9953_port_modes,
};
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index bdbbff2a7909..30b1f1ba762f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -122,16 +122,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- int rc;
-
- rc = devlink_info_driver_name_put(req, "sja1105");
- if (rc)
- return rc;
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
- priv->info->name);
- return rc;
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
}
int sja1105_devlink_setup(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 54065cdedd35..14ff6887a225 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -76,8 +76,7 @@ static const struct regmap_config xrs700x_i2c_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_BIG
};
-static int xrs700x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int xrs700x_i2c_probe(struct i2c_client *i2c)
{
struct xrs700x *priv;
int ret;
@@ -148,7 +147,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
.name = "xrs700x-i2c",
.of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
},
- .probe = xrs700x_i2c_probe,
+ .probe_new = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
.shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index aa0fc00faecb..c4b1b0aa438a 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -99,14 +99,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_change_carrier = dummy_change_carrier,
};
-static void dummy_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strscpy(info->driver, DRV_NAME, sizeof(info->driver));
-}
-
static const struct ethtool_ops dummy_ethtool_ops = {
- .get_drvinfo = dummy_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 1917da784191..323ec56e8a74 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -132,16 +132,6 @@ source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
-
-config FEALNX
- tristate "Myson MTD-8xx PCI Ethernet support"
- depends on PCI
- select CRC32
- select MII
- help
- Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
- cards. <http://www.myson.com.tw/>
-
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 0d872d4efcd1..2fedbaa545eb 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -64,7 +64,6 @@ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
-obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 606c97610808..0805f249fff2 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -196,7 +196,7 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
{
u32 header_len = ADIN1110_RD_HEADER_LEN;
u32 read_len = ADIN1110_REG_LEN;
- struct spi_transfer t[2] = {0};
+ struct spi_transfer t = {0};
int ret;
priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
@@ -209,17 +209,15 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
header_len++;
}
- t[0].tx_buf = &priv->data[0];
- t[0].len = header_len;
-
if (priv->append_crc)
read_len++;
memset(&priv->data[header_len], 0, read_len);
- t[1].rx_buf = &priv->data[header_len];
- t[1].len = read_len;
+ t.tx_buf = &priv->data[0];
+ t.rx_buf = &priv->data[0];
+ t.len = read_len + header_len;
- ret = spi_sync_transfer(priv->spidev, t, 2);
+ ret = spi_sync_transfer(priv->spidev, &t, 1);
if (ret)
return ret;
@@ -296,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
u32 header_len = ADIN1110_RD_HEADER_LEN;
- struct spi_transfer t[2] = {0};
+ struct spi_transfer t;
u32 frame_size_no_fcs;
struct sk_buff *rxb;
u32 frame_size;
@@ -327,12 +325,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
return ret;
frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
-
- rxb = netdev_alloc_skb(port_priv->netdev, round_len);
- if (!rxb)
- return -ENOMEM;
-
- memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
+ memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
@@ -342,21 +335,23 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
header_len++;
}
- skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len);
+ if (!rxb)
+ return -ENOMEM;
- t[0].tx_buf = &priv->data[0];
- t[0].len = header_len;
+ skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN);
- t[1].rx_buf = &rxb->data[0];
- t[1].len = round_len;
+ t.tx_buf = &priv->data[0];
+ t.rx_buf = &rxb->data[0];
+ t.len = header_len + round_len;
- ret = spi_sync_transfer(priv->spidev, t, 2);
+ ret = spi_sync_transfer(priv->spidev, &t, 1);
if (ret) {
kfree_skb(rxb);
return ret;
}
- skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
+ skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN);
rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
@@ -1087,9 +1082,30 @@ static void adin1110_adjust_link(struct net_device *dev)
*/
static int adin1110_check_spi(struct adin1110_priv *priv)
{
+ struct gpio_desc *reset_gpio;
int ret;
u32 val;
+ reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (reset_gpio) {
+ /* MISO pin is used for internal configuration, can't have
+ * anyone else disturbing the SDO line.
+ */
+ spi_bus_lock(priv->spidev->controller);
+
+ gpiod_set_value(reset_gpio, 1);
+ fsleep(10000);
+ gpiod_set_value(reset_gpio, 0);
+
+ /* Need to wait 90 ms before interacting with
+ * the MAC after a HW reset.
+ */
+ fsleep(90000);
+
+ spi_bus_unlock(priv->spidev->controller);
+ }
+
ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 4eecbdfff3ff..82071d0e5f7f 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -288,13 +288,13 @@ do { \
u64_stats_update_end(&(st)->syncp); \
} while (0)
-#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
-{ \
- unsigned int start; \
+#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
+{ \
+ unsigned int start; \
do { \
- start = u64_stats_fetch_begin_irq(&(st)->syncp); \
- newst = (st)->counter; \
- } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \
+ start = u64_stats_fetch_begin(&(st)->syncp); \
+ newst = (st)->counter; \
+ } while (u64_stats_fetch_retry(&(st)->syncp, start)); \
}
struct slic_upr {
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 711d5b5a4c49..66e3af73ec41 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1096,7 +1096,6 @@ static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
}
static const struct phylink_mac_ops alt_tse_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_an_restart = alt_tse_mac_an_restart,
.mac_config = alt_tse_mac_config,
.mac_link_down = alt_tse_mac_link_down,
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 98d6386b7f39..48ae6d810f8f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -118,9 +118,9 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
*(dst) = *src;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
}
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 5a454b58498f..a95529a69cbb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3268,10 +3268,10 @@ static void ena_get_stats64(struct net_device *netdev,
tx_ring = &adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+ start = u64_stats_fetch_begin(&tx_ring->syncp);
packets = tx_ring->tx_stats.cnt;
bytes = tx_ring->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
@@ -3279,20 +3279,20 @@ static void ena_get_stats64(struct net_device *netdev,
rx_ring = &adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+ start = u64_stats_fetch_begin(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
do {
- start = u64_stats_fetch_begin_irq(&adapter->syncp);
+ start = u64_stats_fetch_begin(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
tx_drops = adapter->dev_stats.tx_drops;
- } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
+ } while (u64_stats_fetch_retry(&adapter->syncp, start));
stats->rx_dropped = rx_drops;
stats->tx_dropped = tx_drops;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index d06d260cf1e2..7051bd7cf6dc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -134,27 +134,15 @@ static u64 xgbe_cc_read(const struct cyclecounter *cc)
return nsec;
}
-static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 adjust;
- u32 addend, diff;
- unsigned int neg_adjust = 0;
+ u64 addend;
- if (delta < 0) {
- neg_adjust = 1;
- delta = -delta;
- }
-
- adjust = pdata->tstamp_addend;
- adjust *= delta;
- diff = div_u64(adjust, 1000000000UL);
-
- addend = (neg_adjust) ? pdata->tstamp_addend - diff :
- pdata->tstamp_addend + diff;
+ addend = adjust_by_scaled_ppm(pdata->tstamp_addend, scaled_ppm);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
@@ -235,7 +223,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
netdev_name(pdata->netdev));
info->owner = THIS_MODULE;
info->max_adj = pdata->ptpclk_rate;
- info->adjfreq = xgbe_adjfreq;
+ info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
info->gettime64 = xgbe_gettime;
info->settime64 = xgbe_settime;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 25129e723b57..1e8d902e1c8e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -934,7 +934,7 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
do {
count = 0;
- start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
+ start = u64_stats_fetch_begin(&self->stats.rx.syncp);
data[count] = self->stats.rx.packets;
data[++count] = self->stats.rx.jumbo_packets;
data[++count] = self->stats.rx.lro_packets;
@@ -951,15 +951,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.xdp_tx;
data[++count] = self->stats.rx.xdp_invalid;
data[++count] = self->stats.rx.xdp_redirect;
- } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
+ } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
do {
count = 0;
- start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
+ start = u64_stats_fetch_begin(&self->stats.tx.syncp);
data[count] = self->stats.tx.packets;
data[++count] = self->stats.tx.queue_restarts;
- } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
+ } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));
}
return ++count;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index 8b7cdf015a16..21376c79f671 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -662,12 +662,12 @@ static void ax88796c_get_stats64(struct net_device *ndev,
s = per_cpu_ptr(ax_local->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&s->syncp);
+ start = u64_stats_fetch_begin(&s->syncp);
rx_packets = u64_stats_read(&s->rx_packets);
rx_bytes = u64_stats_read(&s->rx_bytes);
tx_packets = u64_stats_read(&s->tx_packets);
tx_bytes = u64_stats_read(&s->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+ } while (u64_stats_fetch_retry(&s->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 4a1efe9b37d0..ff1a5edf8df1 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1086,7 +1086,6 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
- .validate = phylink_generic_validate,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 7f876721596c..b751dc8486dc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1680,7 +1680,7 @@ static void b44_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+ start = u64_stats_fetch_begin(&hwstat->syncp);
/* Convert HW stats into rtnl_link_stats64 stats. */
nstat->rx_packets = hwstat->rx_pkts;
@@ -1714,7 +1714,7 @@ static void b44_get_stats64(struct net_device *dev,
/* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
}
@@ -2082,12 +2082,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
do {
data_src = &hwstat->tx_good_octets;
data_dst = data;
- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+ start = u64_stats_fetch_begin(&hwstat->syncp);
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
*data_dst++ = *data_src++;
- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index a737b1913cf9..33d86683af50 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -36,13 +36,24 @@
#define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
ETH_FCS_LEN + 4) /* 32 */
+#define ENET_RX_SKB_BUF_SIZE (NET_SKB_PAD + NET_IP_ALIGN + \
+ ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
+ ENET_MTU_MAX + ETH_FCS_LEN + 4)
+#define ENET_RX_SKB_BUF_ALLOC_SIZE (SKB_DATA_ALIGN(ENET_RX_SKB_BUF_SIZE) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define ENET_RX_BUF_DMA_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define ENET_RX_BUF_DMA_SIZE (ENET_RX_SKB_BUF_SIZE - ENET_RX_BUF_DMA_OFFSET)
+
struct bcm4908_enet_dma_ring_bd {
__le32 ctl;
__le32 addr;
} __packed;
struct bcm4908_enet_dma_ring_slot {
- struct sk_buff *skb;
+ union {
+ void *buf; /* RX */
+ struct sk_buff *skb; /* TX */
+ };
unsigned int len;
dma_addr_t dma_addr;
};
@@ -260,22 +271,21 @@ static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int
u32 tmp;
int err;
- slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
-
- slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
- if (!slot->skb)
+ slot->buf = napi_alloc_frag(ENET_RX_SKB_BUF_ALLOC_SIZE);
+ if (!slot->buf)
return -ENOMEM;
- slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
+ slot->dma_addr = dma_map_single(dev, slot->buf + ENET_RX_BUF_DMA_OFFSET,
+ ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE);
err = dma_mapping_error(dev, slot->dma_addr);
if (err) {
dev_err(dev, "Failed to map DMA buffer: %d\n", err);
- kfree_skb(slot->skb);
- slot->skb = NULL;
+ skb_free_frag(slot->buf);
+ slot->buf = NULL;
return err;
}
- tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp = ENET_RX_BUF_DMA_SIZE << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
tmp |= DMA_CTL_STATUS_OWN;
if (idx == enet->rx_ring.length - 1)
tmp |= DMA_CTL_STATUS_WRAP;
@@ -315,11 +325,11 @@ static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
for (i = rx_ring->length - 1; i >= 0; i--) {
slot = &rx_ring->slots[i];
- if (!slot->skb)
+ if (!slot->buf)
continue;
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
- kfree_skb(slot->skb);
- slot->skb = NULL;
+ skb_free_frag(slot->buf);
+ slot->buf = NULL;
}
}
@@ -495,6 +505,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
netif_carrier_off(netdev);
napi_disable(&rx_ring->napi);
napi_disable(&tx_ring->napi);
+ netdev_reset_queue(netdev);
bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
@@ -554,6 +565,8 @@ static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_devic
if (ring->write_idx + 1 == ring->length - 1)
tmp |= DMA_CTL_STATUS_WRAP;
+ netdev_sent_queue(enet->netdev, skb->len);
+
buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
buf_desc->ctl = cpu_to_le32(tmp);
@@ -575,6 +588,7 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
while (handled < weight) {
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot slot;
+ struct sk_buff *skb;
u32 ctl;
int len;
int err;
@@ -598,16 +612,24 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
if (len < ETH_ZLEN ||
(ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
- kfree_skb(slot.skb);
+ skb_free_frag(slot.buf);
enet->netdev->stats.rx_dropped++;
break;
}
- dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, slot.dma_addr, ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE);
+
+ skb = build_skb(slot.buf, ENET_RX_SKB_BUF_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ skb_free_frag(slot.buf);
+ enet->netdev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, ENET_RX_BUF_DMA_OFFSET);
+ skb_put(skb, len - ETH_FCS_LEN);
+ skb->protocol = eth_type_trans(skb, enet->netdev);
- skb_put(slot.skb, len - ETH_FCS_LEN);
- slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
- netif_receive_skb(slot.skb);
+ netif_receive_skb(skb);
enet->netdev->stats.rx_packets++;
enet->netdev->stats.rx_bytes += len;
@@ -652,6 +674,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
tx_ring->read_idx = 0;
}
+ netdev_completed_queue(enet->netdev, handled, bytes);
enet->netdev->stats.tx_packets += handled;
enet->netdev->stats.tx_bytes += bytes;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 425d6ccd5413..38d0cdaf22a5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -295,6 +295,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+ /* RDMA misc statistics */
+ STAT_RDMA("rdma_ovflow_cnt", mib.rdma_ovflow_cnt, RDMA_OVFL_DISC_CNTR),
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
@@ -333,6 +335,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
case BCM_SYSPORT_STAT_NETDEV64:
case BCM_SYSPORT_STAT_RXCHK:
case BCM_SYSPORT_STAT_RBUF:
+ case BCM_SYSPORT_STAT_RDMA:
case BCM_SYSPORT_STAT_SOFT:
return true;
default:
@@ -436,6 +439,14 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
if (val == ~0)
rbuf_writel(priv, 0, s->reg_offset);
break;
+ case BCM_SYSPORT_STAT_RDMA:
+ if (!priv->is_lite)
+ continue;
+
+ val = rdma_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rdma_writel(priv, 0, s->reg_offset);
+ break;
}
j += s->stat_sizeof;
@@ -457,10 +468,10 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
ring = &priv->tx_rings[q];
do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
+ start = u64_stats_fetch_begin(&priv->syncp);
bytes = ring->bytes;
packets = ring->packets;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
*tx_bytes += bytes;
*tx_packets += packets;
@@ -504,9 +515,9 @@ static void bcm_sysport_get_stats(struct net_device *dev,
if (s->stat_sizeof == sizeof(u64) &&
s->type == BCM_SYSPORT_STAT_NETDEV64) {
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
data[i] = *(u64 *)p;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
} else
data[i] = *(u32 *)p;
j++;
@@ -1878,10 +1889,10 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
&stats->tx_packets);
do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
+ start = u64_stats_fetch_begin(&priv->syncp);
stats->rx_packets = stats64->rx_packets;
stats->rx_bytes = stats64->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
}
static void bcm_sysport_netif_start(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 5af16e5f9ad0..335cf6631db5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -290,6 +290,7 @@ struct bcm_rsb {
#define RDMA_WRITE_PTR_HI 0x1010
#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_OVFL_DISC_CNTR 0x1018
#define RDMA_PROD_INDEX 0x1018
#define RDMA_PROD_INDEX_MASK 0xffff
@@ -565,6 +566,7 @@ struct bcm_sysport_mib {
u32 rxchk_other_pkt_disc;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
+ u32 rdma_ovflow_cnt;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
@@ -581,6 +583,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_RDMA,
BCM_SYSPORT_STAT_SOFT,
};
@@ -627,6 +630,14 @@ enum bcm_sysport_stat_type {
.reg_offset = ofs, \
}
+#define STAT_RDMA(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RDMA, \
+ .reg_offset = ofs, \
+}
+
/* TX bytes and packets */
#define NUM_SYSPORT_TXQ_STAT 2
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index fec57f1982c8..dbe310144780 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5415,8 +5415,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
bp->rx_buf_use_size = rx_size;
/* hw alignment + build_skb() overhead*/
- bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
- NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->rx_buf_size = kmalloc_size_roundup(
+ SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 51b1690fd045..5d1e4fe335aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13671,19 +13671,20 @@ static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
return bnx2x_func_state_change(bp, &func_params);
}
-static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
int rc;
int drift_dir = 1;
int val, period, period1, period2, dif, dif1, dif2;
int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
- DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+ DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb);
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
- "PTP adjfreq called while the interface is down\n");
+ "PTP adjfine called while the interface is down\n");
return -ENETDOWN;
}
@@ -13818,7 +13819,7 @@ void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock_info.n_ext_ts = 0;
bp->ptp_clock_info.n_per_out = 0;
bp->ptp_clock_info.pps = 0;
- bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+ bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine;
bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9f8a6ce4b356..0fe164b42c5d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5250,7 +5250,7 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
return 1;
}
-static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
u16 i, j;
@@ -5263,8 +5263,8 @@ static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
}
}
-static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
- struct bnxt_vnic_info *vnic)
+static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
__le16 *ring_tbl = vnic->rss_table;
struct bnxt_rx_ring_info *rxr;
@@ -5285,12 +5285,27 @@ static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
}
}
-static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static void
+__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
+ struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5)
- __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+ bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else
- __bnxt_fill_hw_rss_tbl(bp, vnic);
+ bnxt_fill_hw_rss_tbl(bp, vnic);
+
+ if (bp->rss_hash_delta) {
+ req->hash_type = cpu_to_le32(bp->rss_hash_delta);
+ if (bp->rss_hash_cfg & bp->rss_hash_delta)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
+ else
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
+ } else {
+ req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ }
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
@@ -5307,14 +5322,8 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
if (rc)
return rc;
- if (set_rss) {
- bnxt_fill_hw_rss_tbl(bp, vnic);
- req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
- req->hash_key_tbl_addr =
- cpu_to_le64(vnic->rss_hash_key_dma_addr);
- }
+ if (set_rss)
+ __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
return hwrm_req_send(bp, req);
}
@@ -5335,10 +5344,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
if (!set_rss)
return hwrm_req_send(bp, req);
- bnxt_fill_hw_rss_tbl(bp, vnic);
- req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
ring_tbl_map = vnic->rss_table_dma_addr;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
@@ -5357,6 +5363,25 @@ exit:
return rc;
}
+static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+
+ if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req)) {
+ bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
+ bp->rss_hash_delta = 0;
+ }
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5614,6 +5639,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
(BNXT_CHIP_P5_THOR(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) {
if (BNXT_CHIP_P5_THOR(bp))
@@ -6958,8 +6985,11 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
- if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
+ if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) {
bp->flags |= BNXT_FLAG_MULTI_HOST;
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC;
+ }
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
@@ -8808,6 +8838,8 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
+ if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ bnxt_hwrm_update_rss_hash_cfg(bp);
if (bp->flags & BNXT_FLAG_RFS) {
rc = bnxt_alloc_rfs_vnics(bp);
@@ -12252,6 +12284,8 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ bp->rss_hash_delta = bp->rss_hash_cfg;
if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
@@ -13082,13 +13116,6 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return 0;
}
-static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- return &bp->dl_port;
-}
-
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -13120,7 +13147,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
- .ndo_get_devlink_port = bnxt_get_devlink_port,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -13131,9 +13157,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- if (BNXT_PF(bp))
- devlink_port_type_clear(&bp->dl_port);
-
bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
@@ -13546,6 +13569,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
@@ -13721,8 +13745,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_cleanup;
- if (BNXT_PF(bp))
- devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
bnxt_print_device_info(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d5fa43cfe524..41c6dd0ae447 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1901,6 +1901,7 @@ struct bnxt {
u16 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
+ u32 rss_hash_delta;
u16 max_mtu;
u8 max_tc;
@@ -1966,6 +1967,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
+ #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA 0x00080000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
#define BNXT_FW_CAP_PTP_RTC 0x00400000
@@ -2117,6 +2119,7 @@ struct bnxt {
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 8a6f788f6294..26913dc816d3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -892,10 +892,6 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
u32 ver = 0;
int rc;
- rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
- if (rc)
- return rc;
-
if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) {
sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8cad15c458b3..cbf17fcfb7ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1234,6 +1234,8 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg == rss_hash_cfg)
return 0;
+ if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
bp->rss_hash_cfg = rss_hash_cfg;
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
@@ -2005,6 +2007,14 @@ static void bnxt_get_fec_stats(struct net_device *dev,
rx = bp->rx_port_stats_ext.sw_stats;
fec_stats->corrected_bits.total =
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
+
+ if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
+ return;
+
+ fec_stats->corrected_blocks.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
+ fec_stats->uncorrectable_blocks.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
}
static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
@@ -2514,6 +2524,7 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
#define MSG_INTERNAL_ERR "PKG install error : Internal error"
#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
+#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
@@ -2564,6 +2575,32 @@ static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
+static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
+ struct netlink_ext_ack *extack)
+{
+ u32 item_len;
+ int rc;
+
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
+ &item_len, NULL);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
+ return rc;
+ }
+
+ if (fw_size > item_len) {
+ rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, 0, 1,
+ round_up(fw_size, 4096), NULL, 0);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
+ return rc;
+ }
+ }
+ return 0;
+}
+
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
u32 install_type, struct netlink_ext_ack *extack)
{
@@ -2580,6 +2617,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
u16 index;
int rc;
+ /* resize before flashing larger image than available space */
+ rc = bnxt_resize_update_entry(dev, fw->size, extack);
+ if (rc)
+ return rc;
+
bnxt_hwrm_fw_set_time(bp);
rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
@@ -3146,8 +3188,9 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
- u16 page_number, u16 start_addr,
- u16 data_length, u8 *buf)
+ u16 page_number, u8 bank,
+ u16 start_addr, u16 data_length,
+ u8 *buf)
{
struct hwrm_port_phy_i2c_read_output *output;
struct hwrm_port_phy_i2c_read_input *req;
@@ -3168,8 +3211,13 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
data_length -= xfer_size;
req->page_offset = cpu_to_le16(start_addr + byte_offset);
req->data_length = xfer_size;
- req->enables = cpu_to_le32(start_addr + byte_offset ?
- PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+ req->enables =
+ cpu_to_le32((start_addr + byte_offset ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
+ 0) |
+ (bank ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
+ 0));
rc = hwrm_req_send(bp, req);
if (!rc)
memcpy(buf + byte_offset, output->data, xfer_size);
@@ -3199,7 +3247,7 @@ static int bnxt_get_module_info(struct net_device *dev,
if (bp->hwrm_spec_code < 0x10202)
return -EOPNOTSUPP;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
SFF_DIAG_SUPPORT_OFFSET + 1,
data);
if (!rc) {
@@ -3244,7 +3292,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
if (start < ETH_MODULE_SFF_8436_LEN) {
if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
length = ETH_MODULE_SFF_8436_LEN - start;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
start, length, data);
if (rc)
return rc;
@@ -3256,12 +3304,68 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
start, length, data);
}
return rc;
}
+static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ if (bp->link_info.module_status <=
+ PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+ return 0;
+
+ switch (bp->link_info.module_status) {
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
+ break;
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
+ break;
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown error");
+ break;
+ }
+ return -EINVAL;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_get_module_status(bp, extack);
+ if (rc)
+ return rc;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
+ return -EINVAL;
+ }
+
+ if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
+ return -EINVAL;
+ }
+
+ rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
+ page_data->page, page_data->bank,
+ page_data->offset,
+ page_data->length,
+ page_data->data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -4018,6 +4122,20 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ stats->link_down_events =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
+}
+
void bnxt_ethtool_free(struct bnxt *bp)
{
kfree(bp->test_info);
@@ -4067,10 +4185,12 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index b753032a1047..2686a714a59f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -254,6 +254,8 @@ struct cmd_nums {
#define HWRM_PORT_DSC_DUMP 0xd9UL
#define HWRM_PORT_EP_TX_QCFG 0xdaUL
#define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
@@ -379,6 +381,8 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
#define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
#define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -417,6 +421,8 @@ struct cmd_nums {
#define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
#define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
#define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
@@ -440,6 +446,25 @@ struct cmd_nums {
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_TF_IF_TBL_SET 0x2feUL
#define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -546,8 +571,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 95
-#define HWRM_VERSION_STR "1.10.2.95"
+#define HWRM_VERSION_RSVD 118
+#define HWRM_VERSION_STR "1.10.2.118"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1657,6 +1682,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1804,7 +1833,20 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
#define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
#define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 unused_2[3];
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ u8 unused_2[2];
__le32 partition_min_bw;
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
@@ -1876,6 +1918,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ #define FUNC_CFG_REQ_FLAGS_KEY_CTX_ASSETS_TEST 0x80000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2021,12 +2064,26 @@ struct hwrm_func_cfg_input {
__le16 num_tx_key_ctxs;
__le16 num_rx_key_ctxs;
__le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
- u8 unused_0[7];
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ u8 unused_0[6];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2060,10 +2117,9 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
u8 unused_0[5];
};
@@ -2093,7 +2149,8 @@ struct hwrm_func_qstats_output {
__le64 rx_agg_bytes;
__le64 rx_agg_events;
__le64 rx_agg_aborts;
- u8 unused_0[7];
+ u8 clear_seq;
+ u8 unused_0[6];
u8 valid;
};
@@ -2106,10 +2163,8 @@ struct hwrm_func_qstats_ext_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
u8 unused_0[1];
__le32 enables;
#define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
@@ -2210,6 +2265,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3155,19 +3211,23 @@ struct hwrm_func_ptp_pin_qcfg_output {
#define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
#define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
u8 pin2_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 pin3_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 unused_0;
u8 valid;
};
@@ -3215,23 +3275,27 @@ struct hwrm_func_ptp_pin_cfg_input {
#define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
#define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
u8 pin2_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 pin3_state;
#define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
#define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
#define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
u8 pin3_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 unused_0[4];
};
@@ -3319,9 +3383,9 @@ struct hwrm_func_ptp_ts_query_output {
__le16 seq_id;
__le16 resp_len;
__le64 pps_event_ts;
- __le64 ptm_res_local_ts;
- __le64 ptm_pmstr_ts;
- __le32 ptm_mstr_prop_dly;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
u8 unused_0[3];
u8 valid;
};
@@ -3417,7 +3481,9 @@ struct hwrm_func_backing_store_cfg_v2_input {
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
__le64 page_dir;
__le32 num_entries;
__le16 entry_size;
@@ -3853,7 +3919,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -4150,6 +4216,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ u8 unused_0[7];
u8 valid;
};
@@ -4422,9 +4491,7 @@ struct hwrm_port_qstats_input {
__le64 resp_addr;
__le16 port_id;
u8 flags;
- #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
@@ -4552,9 +4619,7 @@ struct hwrm_port_qstats_ext_input {
__le16 tx_stat_size;
__le16 rx_stat_size;
u8 flags;
- #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
@@ -4613,9 +4678,7 @@ struct hwrm_port_ecn_qstats_input {
__le16 port_id;
__le16 ecn_stat_buf_size;
u8 flags;
- #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0[3];
__le64 ecn_stat_host_addr;
};
@@ -4814,8 +4877,9 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
u8 internal_port_cnt;
u8 valid;
};
@@ -4830,9 +4894,10 @@ struct hwrm_port_phy_i2c_read_input {
__le32 flags;
__le32 enables;
#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
__le16 port_id;
u8 i2c_slave_addr;
- u8 unused_0;
+ u8 bank_number;
__le16 page_number;
__le16 page_offset;
u8 data_length;
@@ -6537,6 +6602,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -6702,6 +6768,53 @@ struct hwrm_vnic_rss_cfg_cmd_err {
u8 unused_0[7];
};
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
__le16 req_type;
@@ -6827,6 +6940,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
#define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
#define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -7626,7 +7740,10 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
@@ -8337,6 +8454,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
u8 unused_0[3];
u8 valid;
};
@@ -8355,7 +8473,9 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI
u8 unused_0[7];
};
@@ -8367,7 +8487,16 @@ struct hwrm_tunnel_dst_port_query_output {
__le16 resp_len;
__le16 tunnel_dst_port_id;
__be16 tunnel_dst_port_val;
- u8 unused_0[3];
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[2];
u8 valid;
};
@@ -8385,7 +8514,9 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -8398,7 +8529,21 @@ struct hwrm_tunnel_dst_port_alloc_output {
__le16 seq_id;
__le16 resp_len;
__le16 tunnel_dst_port_id;
- u8 unused_0[5];
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
u8 valid;
};
@@ -8416,7 +8561,9 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -8428,7 +8575,12 @@ struct hwrm_tunnel_dst_port_free_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_1[7];
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
u8 valid;
};
@@ -8686,9 +8838,7 @@ struct hwrm_stat_generic_qstats_input {
__le64 resp_addr;
__le16 generic_stat_size;
u8 flags;
- #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER 0x0UL
- #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define STAT_GENERIC_QSTATS_REQ_FLAGS_LAST STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0[5];
__le64 generic_stat_host_addr;
};
@@ -10202,6 +10352,7 @@ struct fw_status_reg {
#define FW_STATUS_REG_SHUTDOWN 0x100000UL
#define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
#define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
};
/* hcomm_status (size:64b/8B) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 2132ce63193c..4ec8bba18cdd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -14,6 +14,7 @@
#include <linux/net_tstamp.h>
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -204,24 +205,33 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
return 0;
}
-static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct hwrm_port_mac_cfg_input *req;
struct bnxt *bp = ptp->bp;
- int rc;
+ int rc = 0;
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) {
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
- req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
- rc = hwrm_req_send(ptp->bp, req);
- if (rc)
- netdev_err(ptp->bp->dev,
- "ptp adjfreq failed. rc = %d\n", rc);
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc)
+ netdev_err(ptp->bp->dev,
+ "ptp adjfine failed. rc = %d\n", rc);
+ }
return rc;
}
@@ -749,7 +759,7 @@ static const struct ptp_clock_info bnxt_ptp_caps = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = bnxt_ptp_adjfreq,
+ .adjfine = bnxt_ptp_adjfine,
.adjtime = bnxt_ptp_adjtime,
.do_aux_work = bnxt_ptp_ts_aux_work,
.gettimex64 = bnxt_ptp_gettimex,
@@ -846,8 +856,9 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
memset(&ptp->cc, 0, sizeof(ptp->cc));
ptp->cc.read = bnxt_cc_read;
ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
+ ptp->cc.shift = BNXT_CYCLES_SHIFT;
+ ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift);
+ ptp->cmult = ptp->cc.mult;
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
}
if (init_tc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 4ce0a14c1e23..34162e07a119 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -17,6 +17,8 @@
#define BNXT_PTP_GRC_WIN_BASE 0x6000
#define BNXT_MAX_PHC_DRIFT 31000000
+#define BNXT_CYCLES_SHIFT 23
+#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
@@ -88,8 +90,9 @@ struct bnxt_ptp_cfg {
u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
- /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
- #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+ u32 cmult;
+ /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
u16 tx_seqid;
u16 tx_hdr_off;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 25c450606985..a8ce8d0cf9c4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1387,7 +1387,8 @@ static int bcmgenet_validate_flow(struct net_device *dev,
struct ethtool_usrip4_spec *l4_mask;
struct ethhdr *eth_mask;
- if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
+ cmd->fs.location != RX_CLS_LOC_ANY) {
netdev_err(dev, "rxnfc: Invalid location (%d)\n",
cmd->fs.location);
return -EINVAL;
@@ -1452,7 +1453,7 @@ static int bcmgenet_insert_flow(struct net_device *dev,
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *loc_rule;
- int err;
+ int err, i;
if (priv->hw_params->hfb_filter_size < 128) {
netdev_err(dev, "rxnfc: Not supported by this device\n");
@@ -1470,7 +1471,29 @@ static int bcmgenet_insert_flow(struct net_device *dev,
if (err)
return err;
- loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (cmd->fs.location == RX_CLS_LOC_ANY) {
+ list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
+ cmd->fs.location = loc_rule->fs.location;
+ err = memcmp(&loc_rule->fs, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+ if (!err)
+ /* rule exists so return current location */
+ return 0;
+ }
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ loc_rule = &priv->rxnfc_rules[i];
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
+ cmd->fs.location = i;
+ break;
+ }
+ }
+ if (i == MAX_NUM_OF_FS_RULES) {
+ cmd->fs.location = RX_CLS_LOC_ANY;
+ return -ENOSPC;
+ }
+ } else {
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ }
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
@@ -1583,7 +1606,7 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bcmgenet_get_num_flows(priv);
- cmd->data = MAX_NUM_OF_FS_RULES;
+ cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRULE:
err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 7ded559842e8..b615176338b2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -72,7 +72,6 @@ static void bcmgenet_mac_config(struct net_device *dev)
* Receive clock is provided by the PHY.
*/
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
@@ -95,10 +94,18 @@ static void bcmgenet_mac_config(struct net_device *dev)
*/
void bcmgenet_mii_setup(struct net_device *dev)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
+ u32 reg;
- if (phydev->link)
+ if (phydev->link) {
bcmgenet_mac_config(dev);
+ } else {
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ }
+
phy_print_status(phydev);
}
@@ -266,18 +273,20 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
/* This is an external PHY (xMII), so we need to enable the RGMII
- * block for the interface to work
+ * block for the interface to work, unconditionally clear the
+ * Out-of-band disable since we do not need it.
*/
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~ID_MODE_DIS;
reg |= id_mode_dis;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
reg |= RGMII_MODE_EN_V123;
else
reg |= RGMII_MODE_EN;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4179a12fc881..59debdc344a5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6179,34 +6179,26 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
return 0;
}
-static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
- bool neg_adj = false;
- u32 correction = 0;
-
- if (ppb < 0) {
- neg_adj = true;
- ppb = -ppb;
- }
+ u64 correction;
+ bool neg_adj;
/* Frequency adjustment is performed using hardware with a 24 bit
* accumulator and a programmable correction value. On each clk, the
* correction value gets added to the accumulator and when it
* overflows, the time counter is incremented/decremented.
- *
- * So conversion from ppb to correction value is
- * ppb * (1 << 24) / 1000000000
*/
- correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
- TG3_EAV_REF_CLK_CORRECT_MASK;
+ neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
tg3_full_lock(tp, 0);
if (correction)
tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
TG3_EAV_REF_CLK_CORRECT_EN |
- (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
+ (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
+ ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
else
tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
@@ -6330,7 +6322,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.n_per_out = 1,
.n_pins = 0,
.pps = 0,
- .adjfreq = tg3_ptp_adjfreq,
+ .adjfine = tg3_ptp_adjfine,
.adjtime = tg3_ptp_adjtime,
.gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 8f0ac7b99973..858c92129451 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -18,15 +18,43 @@
/* BFA state machine interfaces */
-typedef void (*bfa_sm_t)(void *sm, int event);
-
/* For converting from state machine function to state encoding. */
-struct bfa_sm_table {
- bfa_sm_t sm; /*!< state machine function */
- int state; /*!< state machine encoding */
- char *name; /*!< state name for display */
-};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM_TABLE(n, s, e, t) \
+struct s; \
+enum e; \
+typedef void (*t)(struct s *, enum e); \
+ \
+struct n ## _sm_table_s { \
+ t sm; /* state machine function */ \
+ int state; /* state machine encoding */ \
+ char *name; /* state name for display */ \
+}; \
+ \
+static inline int \
+n ## _sm_to_state(struct n ## _sm_table_s *smt, t sm) \
+{ \
+ int i = 0; \
+ \
+ while (smt[i].sm && smt[i].sm != sm) \
+ i++; \
+ return smt[i].state; \
+}
+
+BFA_SM_TABLE(iocpf, bfa_iocpf, iocpf_event, bfa_fsm_iocpf_t)
+BFA_SM_TABLE(ioc, bfa_ioc, ioc_event, bfa_fsm_ioc_t)
+BFA_SM_TABLE(cmdq, bfa_msgq_cmdq, cmdq_event, bfa_fsm_msgq_cmdq_t)
+BFA_SM_TABLE(rspq, bfa_msgq_rspq, rspq_event, bfa_fsm_msgq_rspq_t)
+
+BFA_SM_TABLE(ioceth, bna_ioceth, bna_ioceth_event, bna_fsm_ioceth_t)
+BFA_SM_TABLE(enet, bna_enet, bna_enet_event, bna_fsm_enet_t)
+BFA_SM_TABLE(ethport, bna_ethport, bna_ethport_event, bna_fsm_ethport_t)
+BFA_SM_TABLE(tx, bna_tx, bna_tx_event, bna_fsm_tx_t)
+BFA_SM_TABLE(rxf, bna_rxf, bna_rxf_event, bna_fsm_rxf_t)
+BFA_SM_TABLE(rx, bna_rx, bna_rx_event, bna_fsm_rx_t)
+
+#undef BFA_SM_TABLE
+
+#define BFA_SM(_sm) (_sm)
/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
@@ -41,24 +69,12 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ (_fsm)->fsm = (_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_cmp_state(_fsm, _state) \
- ((_fsm)->fsm == (bfa_fsm_t)(_state))
-
-static inline int
-bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
-{
- int i = 0;
-
- while (smt[i].sm && smt[i].sm != sm)
- i++;
- return smt[i].state;
-}
-
+#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state))
/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index cd933817a0b8..b07522ac3e74 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -114,7 +114,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
-static struct bfa_sm_table ioc_sm_table[] = {
+static struct ioc_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
@@ -183,7 +183,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
-static struct bfa_sm_table iocpf_sm_table[] = {
+static struct iocpf_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
@@ -2860,12 +2860,12 @@ static enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc *ioc)
{
enum bfa_iocpf_state iocpf_st;
- enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
@@ -2983,7 +2983,7 @@ bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
enum bfa_iocpf_state iocpf_st;
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
if (iocpf_st == BFA_IOCPF_HWINIT)
bfa_ioc_poll_fwinit(ioc);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index edd0ed5b5332..f30d06ec4ffe 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -147,16 +147,20 @@ struct bfa_ioc_notify {
(__notify)->cbarg = (__cbarg); \
} while (0)
+enum iocpf_event;
+
struct bfa_iocpf {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_iocpf *s, enum iocpf_event e);
struct bfa_ioc *ioc;
bool fw_mismatch_notified;
bool auto_recover;
u32 poll_time;
};
+enum ioc_event;
+
struct bfa_ioc {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_ioc *s, enum ioc_event e);
struct bfa *bfa;
struct bfa_pcidev pcidev;
struct timer_list ioc_timer;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index 47125f419530..fa40d5ec6f1c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -202,7 +202,6 @@ static void
__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
{
size_t len = cmd->msg_size;
- int num_entries = 0;
size_t to_copy;
u8 *src, *dst;
@@ -219,7 +218,6 @@ __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
dst = (u8 *)cmdq->addr.kva;
dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
- num_entries++;
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
index 75343b535798..170a4b4bed96 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
@@ -55,8 +55,10 @@ enum bfa_msgq_cmdq_flags {
BFA_MSGQ_CMDQ_F_DB_UPDATE = 1,
};
+enum cmdq_event;
+
struct bfa_msgq_cmdq {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_msgq_cmdq *s, enum cmdq_event e);
enum bfa_msgq_cmdq_flags flags;
u16 producer_index;
@@ -81,8 +83,10 @@ enum bfa_msgq_rspq_flags {
typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
+enum rspq_event;
+
struct bfa_msgq_rspq {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_msgq_rspq *s, enum rspq_event e);
enum bfa_msgq_rspq_flags flags;
u16 producer_index;
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index a2c983f56b00..883de0ac8de4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1257,7 +1257,7 @@ bna_enet_mtu_get(struct bna_enet *enet)
void
bna_enet_enable(struct bna_enet *enet)
{
- if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
+ if (enet->fsm != bna_enet_sm_stopped)
return;
enet->flags |= BNA_ENET_F_ENABLED;
@@ -1751,12 +1751,12 @@ bna_ioceth_uninit(struct bna_ioceth *ioceth)
void
bna_ioceth_enable(struct bna_ioceth *ioceth)
{
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
+ if (ioceth->fsm == bna_ioceth_sm_ready) {
bnad_cb_ioceth_ready(ioceth->bna->bnad);
return;
}
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
+ if (ioceth->fsm == bna_ioceth_sm_stopped)
bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 2623a0da4682..c05dc7a1c4a1 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1956,7 +1956,7 @@ static void
bna_rx_stop(struct bna_rx *rx)
{
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
- if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ if (rx->fsm == bna_rx_sm_stopped)
bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
else {
rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
@@ -2535,7 +2535,7 @@ bna_rx_destroy(struct bna_rx *rx)
void
bna_rx_enable(struct bna_rx *rx)
{
- if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ if (rx->fsm != bna_rx_sm_stopped)
return;
rx->rx_flags |= BNA_RX_F_ENABLED;
@@ -3523,7 +3523,7 @@ bna_tx_destroy(struct bna_tx *tx)
void
bna_tx_enable(struct bna_tx *tx)
{
- if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ if (tx->fsm != bna_tx_sm_stopped)
return;
tx->flags |= BNA_TX_F_ENABLED;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index 666b6922e24d..a5ebd7110e07 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -312,8 +312,10 @@ struct bna_attr {
/* IOCEth */
+enum bna_ioceth_event;
+
struct bna_ioceth {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_ioceth *s, enum bna_ioceth_event e);
struct bfa_ioc ioc;
struct bna_attr attr;
@@ -334,8 +336,10 @@ struct bna_pause_config {
enum bna_status rx_pause;
};
+enum bna_enet_event;
+
struct bna_enet {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_enet *s, enum bna_enet_event e);
enum bna_enet_flags flags;
enum bna_enet_type type;
@@ -360,8 +364,10 @@ struct bna_enet {
/* Ethport */
+enum bna_ethport_event;
+
struct bna_ethport {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_ethport *s, enum bna_ethport_event e);
enum bna_ethport_flags flags;
enum bna_link_status link_status;
@@ -454,13 +460,16 @@ struct bna_txq {
};
/* Tx object */
+
+enum bna_tx_event;
+
struct bna_tx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_tx *s, enum bna_tx_event e);
enum bna_tx_flags flags;
enum bna_tx_type type;
@@ -698,8 +707,11 @@ struct bna_rxp {
};
/* RxF structure (hardware Rx Function) */
+
+enum bna_rxf_event;
+
struct bna_rxf {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_rxf *s, enum bna_rxf_event e);
struct bfa_msgq_cmd_entry msgq_cmd;
union {
@@ -769,13 +781,16 @@ struct bna_rxf {
};
/* Rx object */
+
+enum bna_rx_event;
+
struct bna_rx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_rx *s, enum bna_rx_event e);
enum bna_rx_type type;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 4f63f1ba3161..95667b979fab 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -742,7 +742,6 @@ static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
}
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = macb_mac_select_pcs,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
@@ -2947,6 +2946,18 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int macb_set_mac_addr(struct net_device *dev, void *addr)
+{
+ int err;
+
+ err = eth_mac_addr(dev, addr);
+ if (err < 0)
+ return err;
+
+ macb_set_hwaddr(netdev_priv(dev));
+ return 0;
+}
+
static void gem_update_stats(struct macb *bp)
{
struct macb_queue *queue;
@@ -3786,7 +3797,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = macb_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macb_poll_controller,
#endif
@@ -4049,6 +4060,8 @@ static int macb_init(struct platform_device *pdev)
dev->ethtool_ops = &macb_ethtool_ops;
}
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
/* Set features */
dev->hw_features = NETIF_F_SG;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 98793b2ac2c7..fd7c80edb6e8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1512,14 +1512,17 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * liquidio_ptp_adjfreq - Adjust ptp frequency
+ * liquidio_ptp_adjfine - Adjust ptp frequency
* @ptp: PTP clock info
- * @ppb: how much to adjust by, in parts-per-billion
+ * @scaled_ppm: how much to adjust by, in scaled parts-per-million
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
u64 comp, delta;
unsigned long flags;
bool neg_adj = false;
@@ -1643,7 +1646,7 @@ static void oct_ptp_open(struct net_device *netdev)
lio->ptp_info.n_ext_ts = 0;
lio->ptp_info.n_per_out = 0;
lio->ptp_info.pps = 0;
- lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjfine = liquidio_ptp_adjfine;
lio->ptp_info.adjtime = liquidio_ptp_adjtime;
lio->ptp_info.gettime64 = liquidio_ptp_gettime;
lio->ptp_info.settime64 = liquidio_ptp_settime;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 5bf117d2179f..cbd06d9b95d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -194,17 +194,20 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
}
/**
- * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
+ * cxgb4_ptp_adjfine - Adjust frequency of PHC cycle counter
* @ptp: ptp clock structure
- * @ppb: Desired frequency change in parts per billion
+ * @scaled_ppm: Desired frequency in scaled parts per billion
*
- * Adjust the frequency of the PHC cycle counter by the indicated ppb from
+ * Adjust the frequency of the PHC cycle counter by the indicated amount from
* the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int cxgb4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct adapter *adapter = (struct adapter *)container_of(ptp,
struct adapter, ptp_clock_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
struct fw_ptp_cmd c;
int err;
@@ -404,7 +407,7 @@ static const struct ptp_clock_info cxgb4_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = 0,
.pps = 0,
- .adjfreq = cxgb4_ptp_adjfreq,
+ .adjfine = cxgb4_ptp_adjfine,
.adjtime = cxgb4_ptp_adjtime,
.gettime64 = cxgb4_ptp_gettime,
.settime64 = cxgb4_ptp_settime,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index da9973b711f4..1a5fdd755e9e 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1839,9 +1839,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
*/
if (prior_data_len) {
int i = 0;
- u8 *data = NULL;
skb_frag_t *f;
- u8 *vaddr;
int frag_size = 0, frag_delta = 0;
while (remaining > 0) {
@@ -1853,24 +1851,24 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
i++;
}
f = &record->frags[i];
- vaddr = kmap_atomic(skb_frag_page(f));
-
- data = vaddr + skb_frag_off(f) + remaining;
frag_delta = skb_frag_size(f) - remaining;
if (frag_delta >= prior_data_len) {
- memcpy(prior_data, data, prior_data_len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ prior_data_len);
} else {
- memcpy(prior_data, data, frag_delta);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ frag_delta);
+
/* get the next page */
f = &record->frags[i + 1];
- vaddr = kmap_atomic(skb_frag_page(f));
- data = vaddr + skb_frag_off(f);
- memcpy(prior_data + frag_delta,
- data, (prior_data_len - frag_delta));
- kunmap_atomic(vaddr);
+
+ memcpy_from_page(prior_data + frag_delta,
+ skb_frag_page(f),
+ skb_frag_off(f),
+ prior_data_len - frag_delta);
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index a0964b629ffc..300ad05ee05b 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -226,21 +226,6 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
return enic->rq_count + wq;
}
-static inline unsigned int enic_legacy_io_intr(void)
-{
- return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
- return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
- return 2;
-}
-
static inline unsigned int enic_msix_rq_intr(struct enic *enic,
unsigned int rq)
{
@@ -258,6 +243,10 @@ static inline unsigned int enic_msix_err_intr(struct enic *enic)
return enic->rq_count + enic->wq_count;
}
+#define ENIC_LEGACY_IO_INTR 0
+#define ENIC_LEGACY_ERR_INTR 1
+#define ENIC_LEGACY_NOTIFY_INTR 2
+
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
return enic->rq_count + enic->wq_count + 1;
@@ -267,7 +256,7 @@ static inline bool enic_is_err_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- return intr == enic_legacy_err_intr();
+ return intr == ENIC_LEGACY_ERR_INTR;
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_err_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
@@ -280,7 +269,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- return intr == enic_legacy_notify_intr();
+ return intr == ENIC_LEGACY_NOTIFY_INTR;
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_notify_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 29500d32e362..37bd38d772e8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -448,9 +448,9 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
{
struct net_device *netdev = data;
struct enic *enic = netdev_priv(netdev);
- unsigned int io_intr = enic_legacy_io_intr();
- unsigned int err_intr = enic_legacy_err_intr();
- unsigned int notify_intr = enic_legacy_notify_intr();
+ unsigned int io_intr = ENIC_LEGACY_IO_INTR;
+ unsigned int err_intr = ENIC_LEGACY_ERR_INTR;
+ unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR;
u32 pba;
vnic_intr_mask(&enic->intr[io_intr]);
@@ -1507,7 +1507,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
struct enic *enic = netdev_priv(netdev);
unsigned int cq_rq = enic_cq_rq(enic, 0);
unsigned int cq_wq = enic_cq_wq(enic, 0);
- unsigned int intr = enic_legacy_io_intr();
+ unsigned int intr = ENIC_LEGACY_IO_INTR;
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int work_done, rq_work_done = 0, wq_work_done;
@@ -1856,8 +1856,7 @@ static int enic_dev_notify_set(struct enic *enic)
spin_lock_bh(&enic->devcmd_lock);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- err = vnic_dev_notify_set(enic->vdev,
- enic_legacy_notify_intr());
+ err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR);
break;
case VNIC_DEV_INTR_MODE_MSIX:
err = vnic_dev_notify_set(enic->vdev,
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index fdf10318758b..5715b9ab2712 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 2c67a857a42f..db6615aa921b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -814,7 +814,6 @@ rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->old_tx % TX_RING_SIZE;
- int tx_use = 0;
unsigned long flag = 0;
if (irq)
@@ -839,7 +838,6 @@ rio_free_tx (struct net_device *dev, int irq)
np->tx_skbuff[entry] = NULL;
entry = (entry + 1) % TX_RING_SIZE;
- tx_use++;
}
if (irq)
spin_unlock(&np->tx_lock);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 43def191f26f..aaf0eda96292 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1414,7 +1414,6 @@ static void refill_rx (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int entry;
- int cnt = 0;
/* Refill the Rx ring buffers. */
for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
@@ -1441,7 +1440,6 @@ static void refill_rx (struct net_device *dev)
np->rx_ring[entry].frag.length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
- cnt++;
}
}
static void netdev_error(struct net_device *dev, int intr_status)
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 77edc3d9b505..a29de29bdf23 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -389,10 +389,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
struct be_rx_stats *stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_irq(&stats->sync);
+ start = u64_stats_fetch_begin(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
+ } while (u64_stats_fetch_retry(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
@@ -405,19 +405,19 @@ static void be_get_ethtool_stats(struct net_device *netdev,
struct be_tx_stats *stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_irq(&stats->sync_compl);
+ start = u64_stats_fetch_begin(&stats->sync_compl);
data[base] = stats->tx_compl;
- } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
+ } while (u64_stats_fetch_retry(&stats->sync_compl, start));
do {
- start = u64_stats_fetch_begin_irq(&stats->sync);
+ start = u64_stats_fetch_begin(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
+ } while (u64_stats_fetch_retry(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a92a74761546..46fe3d74e2e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -665,10 +665,10 @@ static void be_get_stats64(struct net_device *netdev,
const struct be_rx_stats *rx_stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->sync);
+ start = u64_stats_fetch_begin(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
bytes = rx_stats(rxo)->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
+ } while (u64_stats_fetch_retry(&rx_stats->sync, start));
stats->rx_packets += pkts;
stats->rx_bytes += bytes;
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -680,10 +680,10 @@ static void be_get_stats64(struct net_device *netdev,
const struct be_tx_stats *tx_stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->sync);
+ start = u64_stats_fetch_begin(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
bytes = tx_stats(txo)->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
+ } while (u64_stats_fetch_retry(&tx_stats->sync, start));
stats->tx_packets += pkts;
stats->tx_bytes += bytes;
}
@@ -2155,16 +2155,16 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
- start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
+ start = u64_stats_fetch_begin(&rxo->stats.sync);
rx_pkts += rxo->stats.rx_pkts;
- } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+ } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
}
for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
- start = u64_stats_fetch_begin_irq(&txo->stats.sync);
+ start = u64_stats_fetch_begin(&txo->stats.sync);
tx_pkts += txo->stats.tx_reqs;
- } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+ } while (u64_stats_fetch_retry(&txo->stats.sync, start));
}
/* Skip, if wrapped around or first calculation */
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 09a723b827c7..f93ba48bac3f 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -18,6 +18,8 @@
#define TSNEP "tsnep"
#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_RX_REFILL 16
+#define TSNEP_RING_RX_REUSE (TSNEP_RING_SIZE - TSNEP_RING_SIZE / 4)
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
@@ -110,6 +112,7 @@ struct tsnep_rx {
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int write;
int read;
u32 owner_counter;
int increment_owner_counter;
@@ -119,6 +122,7 @@ struct tsnep_rx {
u32 bytes;
u32 dropped;
u32 multicast;
+ u32 alloc_failed;
};
struct tsnep_queue {
@@ -132,6 +136,8 @@ struct tsnep_queue {
int irq;
u32 irq_mask;
+ void __iomem *irq_delay_addr;
+ u8 irq_delay;
};
struct tsnep_adapter {
@@ -223,5 +229,7 @@ static inline void tsnep_ethtool_self_test(struct net_device *dev,
#endif /* CONFIG_TSNEP_SELFTESTS */
void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs);
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue);
#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index a713a126b227..716815dad7d2 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -8,6 +8,7 @@ static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
"rx_bytes",
"rx_dropped",
"rx_multicast",
+ "rx_alloc_failed",
"rx_phy_errors",
"rx_forwarded_phy_errors",
"rx_invalid_frame_errors",
@@ -21,6 +22,7 @@ struct tsnep_stats {
u64 rx_bytes;
u64 rx_dropped;
u64 rx_multicast;
+ u64 rx_alloc_failed;
u64 rx_phy_errors;
u64 rx_forwarded_phy_errors;
u64 rx_invalid_frame_errors;
@@ -36,6 +38,7 @@ static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
"rx_%d_bytes",
"rx_%d_dropped",
"rx_%d_multicast",
+ "rx_%d_alloc_failed",
"rx_%d_no_descriptor_errors",
"rx_%d_buffer_too_small_errors",
"rx_%d_fifo_overflow_errors",
@@ -47,6 +50,7 @@ struct tsnep_rx_queue_stats {
u64 rx_bytes;
u64 rx_dropped;
u64 rx_multicast;
+ u64 rx_alloc_failed;
u64 rx_no_descriptor_errors;
u64 rx_buffer_too_small_errors;
u64 rx_fifo_overflow_errors;
@@ -178,6 +182,7 @@ static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
tsnep_stats.rx_bytes += adapter->rx[i].bytes;
tsnep_stats.rx_dropped += adapter->rx[i].dropped;
tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ tsnep_stats.rx_alloc_failed += adapter->rx[i].alloc_failed;
}
reg = ioread32(adapter->addr + ECM_STAT);
tsnep_stats.rx_phy_errors =
@@ -200,6 +205,8 @@ static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ tsnep_rx_queue_stats.rx_alloc_failed =
+ adapter->rx[i].alloc_failed;
reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
TSNEP_RX_STATISTIC);
tsnep_rx_queue_stats.rx_no_descriptor_errors =
@@ -250,10 +257,10 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int tsnep_ethtool_get_rxnfc(struct net_device *dev,
+static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
- struct tsnep_adapter *adapter = netdev_priv(dev);
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
@@ -273,10 +280,10 @@ static int tsnep_ethtool_get_rxnfc(struct net_device *dev,
}
}
-static int tsnep_ethtool_set_rxnfc(struct net_device *dev,
+static int tsnep_ethtool_set_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
- struct tsnep_adapter *adapter = netdev_priv(dev);
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -288,10 +295,21 @@ static int tsnep_ethtool_set_rxnfc(struct net_device *dev,
}
}
-static int tsnep_ethtool_get_ts_info(struct net_device *dev,
+static void tsnep_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ ch->max_rx = adapter->num_rx_queues;
+ ch->max_tx = adapter->num_tx_queues;
+ ch->rx_count = adapter->num_rx_queues;
+ ch->tx_count = adapter->num_tx_queues;
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
- struct tsnep_adapter *adapter = netdev_priv(dev);
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
@@ -313,7 +331,137 @@ static int tsnep_ethtool_get_ts_info(struct net_device *dev,
return 0;
}
+static struct tsnep_queue *tsnep_get_queue_with_tx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_rx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static int tsnep_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue;
+
+ queue = tsnep_get_queue_with_rx(adapter, 0);
+ if (queue)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ queue = tsnep_get_queue_with_tx(adapter, 0);
+ if (queue)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ int retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* RX coalesce has priority for queues with TX and RX */
+ if (adapter->queue[i].rx)
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->rx_coalesce_usecs);
+ else
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_ethtool_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_rx);
+
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_tx);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+ int retval;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_rx, ec->rx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ /* RX coalesce has priority for queues with TX and RX */
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx && !queue_with_tx->rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_tx, ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
const struct ethtool_ops tsnep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = tsnep_ethtool_get_drvinfo,
.get_regs_len = tsnep_ethtool_get_regs_len,
.get_regs = tsnep_ethtool_get_regs,
@@ -327,7 +475,12 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_sset_count = tsnep_ethtool_get_sset_count,
.get_rxnfc = tsnep_ethtool_get_rxnfc,
.set_rxnfc = tsnep_ethtool_set_rxnfc,
+ .get_channels = tsnep_ethtool_get_channels,
.get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_coalesce = tsnep_ethtool_get_coalesce,
+ .set_coalesce = tsnep_ethtool_set_coalesce,
+ .get_per_queue_coalesce = tsnep_ethtool_get_per_queue_coalesce,
+ .set_per_queue_coalesce = tsnep_ethtool_set_per_queue_coalesce,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
index 315dada75323..55e1caf193a6 100644
--- a/drivers/net/ethernet/engleder/tsnep_hw.h
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -48,6 +48,13 @@
#define ECM_COUNTER_LOW 0x0028
#define ECM_COUNTER_HIGH 0x002C
+/* interrupt delay */
+#define ECM_INT_DELAY 0x0030
+#define ECM_INT_DELAY_MASK 0xF0
+#define ECM_INT_DELAY_SHIFT 4
+#define ECM_INT_DELAY_BASE_US 16
+#define ECM_INT_DELAY_OFFSET 1
+
/* control and status */
#define ECM_STATUS 0x0080
#define ECM_LINK_MODE_OFF 0x01000000
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 13d5ff4e0e02..bf0190e1d2ea 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -39,6 +39,10 @@
#endif
#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+#define TSNEP_COALESCE_USECS_DEFAULT 64
+#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
+ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
+
static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
{
iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
@@ -83,6 +87,33 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
return IRQ_HANDLED;
}
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
+{
+ if (usecs > TSNEP_COALESCE_USECS_MAX)
+ return -ERANGE;
+
+ usecs /= ECM_INT_DELAY_BASE_US;
+ usecs <<= ECM_INT_DELAY_SHIFT;
+ usecs &= ECM_INT_DELAY_MASK;
+
+ queue->irq_delay &= ~ECM_INT_DELAY_MASK;
+ queue->irq_delay |= usecs;
+ iowrite8(queue->irq_delay, queue->irq_delay_addr);
+
+ return 0;
+}
+
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
+{
+ u32 usecs;
+
+ usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
+ usecs >>= ECM_INT_DELAY_SHIFT;
+ usecs *= ECM_INT_DELAY_BASE_US;
+
+ return usecs;
+}
+
static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
{
struct tsnep_adapter *adapter = bus->priv;
@@ -629,23 +660,6 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
}
}
-static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
- struct tsnep_rx_entry *entry)
-{
- struct page *page;
-
- page = page_pool_dev_alloc_pages(rx->page_pool);
- if (unlikely(!page))
- return -ENOMEM;
-
- entry->page = page;
- entry->len = TSNEP_MAX_RX_BUF_SIZE;
- entry->dma = page_pool_get_dma_addr(entry->page);
- entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
-
- return 0;
-}
-
static int tsnep_rx_ring_init(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
@@ -692,10 +706,6 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
-
- retval = tsnep_rx_alloc_buffer(rx, entry);
- if (retval)
- goto failed;
}
return 0;
@@ -705,6 +715,45 @@ failed:
return retval;
}
+static int tsnep_rx_desc_available(struct tsnep_rx *rx)
+{
+ if (rx->read <= rx->write)
+ return TSNEP_RING_SIZE - rx->write + rx->read - 1;
+ else
+ return rx->read - rx->write - 1;
+}
+
+static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct page *page)
+{
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
+}
+
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+ tsnep_rx_set_page(rx, entry, page);
+
+ return 0;
+}
+
+static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_page(rx, entry, read->page);
+ read->page = NULL;
+}
+
static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
@@ -732,6 +781,48 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
entry->desc->properties = __cpu_to_le32(entry->properties);
}
+static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int index;
+ bool alloc_failed = false;
+ bool enable = false;
+ int i;
+ int retval;
+
+ for (i = 0; i < count && !alloc_failed; i++) {
+ index = (rx->write + i) % TSNEP_RING_SIZE;
+
+ retval = tsnep_rx_alloc_buffer(rx, index);
+ if (unlikely(retval)) {
+ rx->alloc_failed++;
+ alloc_failed = true;
+
+ /* reuse only if no other allocation was successful */
+ if (i == 0 && reuse)
+ tsnep_rx_reuse_buffer(rx, index);
+ else
+ break;
+ }
+
+ tsnep_rx_activate(rx, index);
+
+ enable = true;
+ }
+
+ if (enable) {
+ rx->write = (rx->write + i) % TSNEP_RING_SIZE;
+
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+ }
+
+ return i;
+}
+
static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
int length)
{
@@ -767,23 +858,42 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
+ int desc_available;
int done = 0;
enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
- struct page *page;
struct sk_buff *skb;
int length;
- bool enable = false;
- int retval;
+ desc_available = tsnep_rx_desc_available(rx);
dma_dir = page_pool_get_dma_dir(rx->page_pool);
- while (likely(done < budget)) {
+ while (likely(done < budget) && (rx->read != rx->write)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_OWNER_COUNTER_MASK) !=
(entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill(rx, desc_available,
+ reuse);
+ if (!entry->page) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
/* descriptor properties shall be read first, because valid data
* is signaled there
@@ -795,49 +905,30 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
TSNEP_DESC_LENGTH_MASK;
dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
length, dma_dir);
- page = entry->page;
- /* forward skb only if allocation is successful, otherwise
- * page is reused and frame dropped
- */
- retval = tsnep_rx_alloc_buffer(rx, entry);
- if (!retval) {
- skb = tsnep_build_skb(rx, page, length);
- if (skb) {
- page_pool_release_page(rx->page_pool, page);
-
- rx->packets++;
- rx->bytes += length -
- TSNEP_RX_INLINE_METADATA_SIZE;
- if (skb->pkt_type == PACKET_MULTICAST)
- rx->multicast++;
-
- napi_gro_receive(napi, skb);
- } else {
- page_pool_recycle_direct(rx->page_pool, page);
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ desc_available++;
- rx->dropped++;
- }
- done++;
- } else {
- rx->dropped++;
- }
+ skb = tsnep_build_skb(rx, entry->page, length);
+ if (skb) {
+ page_pool_release_page(rx->page_pool, entry->page);
- tsnep_rx_activate(rx, rx->read);
+ rx->packets++;
+ rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
- enable = true;
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, entry->page);
- rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ rx->dropped++;
+ }
+ entry->page = NULL;
}
- if (enable) {
- /* descriptor properties shall be valid before hardware is
- * notified
- */
- dma_wmb();
-
- iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
- }
+ if (desc_available)
+ tsnep_rx_refill(rx, desc_available, false);
return done;
}
@@ -846,11 +937,13 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
{
struct tsnep_rx_entry *entry;
- entry = &rx->entry[rx->read];
- if ((__le32_to_cpu(entry->desc_wb->properties) &
- TSNEP_DESC_OWNER_COUNTER_MASK) ==
- (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
- return true;
+ if (rx->read != rx->write) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) ==
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ return true;
+ }
return false;
}
@@ -859,7 +952,6 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
int queue_index, struct tsnep_rx *rx)
{
dma_addr_t dma;
- int i;
int retval;
memset(rx, 0, sizeof(*rx));
@@ -877,13 +969,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
rx->owner_counter = 1;
rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
- for (i = 0; i < TSNEP_RING_SIZE; i++)
- tsnep_rx_activate(rx, i);
-
- /* descriptor properties shall be valid before hardware is notified */
- dma_wmb();
-
- iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+ tsnep_rx_refill(rx, tsnep_rx_desc_available(rx), false);
return 0;
}
@@ -1371,6 +1457,11 @@ static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
adapter->queue[0].tx = &adapter->tx[0];
adapter->queue[0].rx = &adapter->rx[0];
adapter->queue[0].irq_mask = irq_mask;
+ adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[0],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
adapter->netdev->irq = adapter->queue[0].irq;
@@ -1391,6 +1482,12 @@ static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
adapter->queue[i].rx = &adapter->rx[i];
adapter->queue[i].irq_mask =
irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ adapter->queue[i].irq_delay_addr =
+ adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
}
return 0;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index d95d78230828..6c8c78018ce6 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -11,6 +11,8 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -27,8 +29,8 @@
#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
-#define MAX_PKT_SIZE 1518
#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
+#define MAX_PKT_SIZE RX_BUF_SIZE /* multi-segment not supported */
#if MAX_PKT_SIZE > 0x7ff
#error invalid MAX_PKT_SIZE
@@ -159,6 +161,7 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
static int ftmac100_start_hw(struct ftmac100 *priv)
{
struct net_device *netdev = priv->netdev;
+ unsigned int maccr = MACCR_ENABLE_ALL;
if (ftmac100_reset(priv))
return -EIO;
@@ -175,7 +178,11 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
ftmac100_set_mac(priv, netdev->dev_addr);
- iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+ /* See ftmac100_change_mtu() */
+ if (netdev->mtu > ETH_DATA_LEN)
+ maccr |= FTMAC100_MACCR_RX_FTL;
+
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
}
@@ -218,11 +225,6 @@ static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
}
-static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
-}
-
static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
@@ -337,13 +339,7 @@ static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
error = true;
}
- if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx frame too long\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+ if (unlikely(ftmac100_rxdes_runt(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx runt\n");
@@ -356,6 +352,11 @@ static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
netdev->stats.rx_length_errors++;
error = true;
}
+ /*
+ * FTMAC100_RXDES0_FTL is not an error, it just indicates that the
+ * frame is longer than 1518 octets. Receiving these is possible when
+ * we told the hardware not to drop them, via FTMAC100_MACCR_RX_FTL.
+ */
return error;
}
@@ -400,12 +401,13 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
return true;
}
- /*
- * It is impossible to get multi-segment packets
- * because we always provide big enough receive buffers.
- */
+ /* We don't support multi-segment packets for now, so drop them. */
ret = ftmac100_rxdes_last_segment(rxdes);
- BUG_ON(!ret);
+ if (unlikely(!ret)) {
+ netdev->stats.rx_length_errors++;
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
/* start processing */
skb = netdev_alloc_skb_ip_align(netdev, 128);
@@ -1037,6 +1039,28 @@ static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int c
return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
}
+static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+ if (mtu > ETH_DATA_LEN) {
+ /* process long packets in the driver */
+ maccr |= FTMAC100_MACCR_RX_FTL;
+ } else {
+ /* Let the controller drop incoming packets greater
+ * than 1518 (that is 1500 + 14 Ethernet + 4 FCS).
+ */
+ maccr &= ~FTMAC100_MACCR_RX_FTL;
+ }
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
+
+ netdev->mtu = mtu;
+
+ return 0;
+}
+
static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_open = ftmac100_open,
.ndo_stop = ftmac100_stop,
@@ -1044,6 +1068,7 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = ftmac100_do_ioctl,
+ .ndo_change_mtu = ftmac100_change_mtu,
};
/******************************************************************************
@@ -1075,7 +1100,7 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
- netdev->max_mtu = MAX_PKT_SIZE;
+ netdev->max_mtu = MAX_PKT_SIZE - VLAN_ETH_HLEN;
err = platform_get_ethdev_address(&pdev->dev, netdev);
if (err == -EPROBE_DEFER)
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
deleted file mode 100644
index ed18450fd2cc..000000000000
--- a/drivers/net/ethernet/fealnx.c
+++ /dev/null
@@ -1,1953 +0,0 @@
-/*
- Written 1998-2000 by Donald Becker.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License (GPL), incorporated herein by reference.
- Drivers based on or derived from this code fall under the GPL and must
- retain the authorship, copyright and license notice. This file is not
- a complete program and may only be used when the entire operating
- system is licensed under the GPL.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Support information and updates available at
- http://www.scyld.com/network/pci-skeleton.html
-
- Linux kernel updates:
-
- Version 2.51, Nov 17, 2001 (jgarzik):
- - Add ethtool support
- - Replace some MII-related magic numbers with constants
-
-*/
-
-#define DRV_NAME "fealnx"
-
-static int debug; /* 1-> print debug message */
-static int max_interrupt_work = 20;
-
-/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
-static int multicast_filter_limit = 32;
-
-/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
-/* Setting to > 1518 effectively disables this feature. */
-static int rx_copybreak;
-
-/* Used to pass the media type, etc. */
-/* Both 'options[]' and 'full_duplex[]' should exist for driver */
-/* interoperability. */
-/* The media type is usually passed in 'options[]'. */
-#define MAX_UNITS 8 /* More are supported, limit only on options */
-static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
-static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
-
-/* Operational parameters that are set at compile time. */
-/* Keep the ring sizes a power of two for compile efficiency. */
-/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
-/* Making the Tx ring too large decreases the effectiveness of channel */
-/* bonding and packet priority. */
-/* There are no ill effects from too-large receive rings. */
-// 88-12-9 modify,
-// #define TX_RING_SIZE 16
-// #define RX_RING_SIZE 32
-#define TX_RING_SIZE 6
-#define RX_RING_SIZE 12
-#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
-#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
-
-/* Operational parameters that usually are not changed. */
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (2*HZ)
-
-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
-
-
-/* Include files, designed to support most kernel versions 2.0.0 and later. */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-
-/* This driver was written to use PCI memory space, however some x86 systems
- work only with I/O space accesses. */
-#ifndef __alpha__
-#define USE_IO_OPS
-#endif
-
-/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
-/* This is only in the support-all-kernels source code. */
-
-#define RUN_AT(x) (jiffies + (x))
-
-MODULE_AUTHOR("Myson or whoever");
-MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
-MODULE_LICENSE("GPL");
-module_param(max_interrupt_work, int, 0);
-module_param(debug, int, 0);
-module_param(rx_copybreak, int, 0);
-module_param(multicast_filter_limit, int, 0);
-module_param_array(options, int, NULL, 0);
-module_param_array(full_duplex, int, NULL, 0);
-MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
-MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
-MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
-MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
-MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
-
-enum {
- MIN_REGION_SIZE = 136,
-};
-
-/* A chip capabilities table, matching the entries in pci_tbl[] above. */
-enum chip_capability_flags {
- HAS_MII_XCVR,
- HAS_CHIP_XCVR,
-};
-
-/* 89/6/13 add, */
-/* for different PHY */
-enum phy_type_flags {
- MysonPHY = 1,
- AhdocPHY = 2,
- SeeqPHY = 3,
- MarvellPHY = 4,
- Myson981 = 5,
- LevelOnePHY = 6,
- OtherPHY = 10,
-};
-
-struct chip_info {
- char *chip_name;
- int flags;
-};
-
-static const struct chip_info skel_netdrv_tbl[] = {
- { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
- { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
- { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
-};
-
-/* Offsets to the Command and Status Registers. */
-enum fealnx_offsets {
- PAR0 = 0x0, /* physical address 0-3 */
- PAR1 = 0x04, /* physical address 4-5 */
- MAR0 = 0x08, /* multicast address 0-3 */
- MAR1 = 0x0C, /* multicast address 4-7 */
- FAR0 = 0x10, /* flow-control address 0-3 */
- FAR1 = 0x14, /* flow-control address 4-5 */
- TCRRCR = 0x18, /* receive & transmit configuration */
- BCR = 0x1C, /* bus command */
- TXPDR = 0x20, /* transmit polling demand */
- RXPDR = 0x24, /* receive polling demand */
- RXCWP = 0x28, /* receive current word pointer */
- TXLBA = 0x2C, /* transmit list base address */
- RXLBA = 0x30, /* receive list base address */
- ISR = 0x34, /* interrupt status */
- IMR = 0x38, /* interrupt mask */
- FTH = 0x3C, /* flow control high/low threshold */
- MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
- TALLY = 0x44, /* tally counters for crc and mpa */
- TSR = 0x48, /* tally counter for transmit status */
- BMCRSR = 0x4c, /* basic mode control and status */
- PHYIDENTIFIER = 0x50, /* phy identifier */
- ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
- partner ability */
- ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
- BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
-};
-
-/* Bits in the interrupt status/enable registers. */
-/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
-enum intr_status_bits {
- RFCON = 0x00020000, /* receive flow control xon packet */
- RFCOFF = 0x00010000, /* receive flow control xoff packet */
- LSCStatus = 0x00008000, /* link status change */
- ANCStatus = 0x00004000, /* autonegotiation completed */
- FBE = 0x00002000, /* fatal bus error */
- FBEMask = 0x00001800, /* mask bit12-11 */
- ParityErr = 0x00000000, /* parity error */
- TargetErr = 0x00001000, /* target abort */
- MasterErr = 0x00000800, /* master error */
- TUNF = 0x00000400, /* transmit underflow */
- ROVF = 0x00000200, /* receive overflow */
- ETI = 0x00000100, /* transmit early int */
- ERI = 0x00000080, /* receive early int */
- CNTOVF = 0x00000040, /* counter overflow */
- RBU = 0x00000020, /* receive buffer unavailable */
- TBU = 0x00000010, /* transmit buffer unavilable */
- TI = 0x00000008, /* transmit interrupt */
- RI = 0x00000004, /* receive interrupt */
- RxErr = 0x00000002, /* receive error */
-};
-
-/* Bits in the NetworkConfig register, W for writing, R for reading */
-/* FIXME: some names are invented by me. Marked with (name?) */
-/* If you have docs and know bit names, please fix 'em */
-enum rx_mode_bits {
- CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
- CR_W_FD = 0x00100000, /* full duplex */
- CR_W_PS10 = 0x00080000, /* 10 mbit */
- CR_W_TXEN = 0x00040000, /* tx enable (name?) */
- CR_W_PS1000 = 0x00010000, /* 1000 mbit */
- /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
- CR_W_RXMODEMASK = 0x000000e0,
- CR_W_PROM = 0x00000080, /* promiscuous mode */
- CR_W_AB = 0x00000040, /* accept broadcast */
- CR_W_AM = 0x00000020, /* accept mutlicast */
- CR_W_ARP = 0x00000008, /* receive runt pkt */
- CR_W_ALP = 0x00000004, /* receive long pkt */
- CR_W_SEP = 0x00000002, /* receive error pkt */
- CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
-
- CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
- CR_R_FD = 0x00100000, /* full duplex detected */
- CR_R_PS10 = 0x00080000, /* 10 mbit detected */
- CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
-};
-
-/* The Tulip Rx and Tx buffer descriptors. */
-struct fealnx_desc {
- s32 status;
- s32 control;
- u32 buffer;
- u32 next_desc;
- struct fealnx_desc *next_desc_logical;
- struct sk_buff *skbuff;
- u32 reserved1;
- u32 reserved2;
-};
-
-/* Bits in network_desc.status */
-enum rx_desc_status_bits {
- RXOWN = 0x80000000, /* own bit */
- FLNGMASK = 0x0fff0000, /* frame length */
- FLNGShift = 16,
- MARSTATUS = 0x00004000, /* multicast address received */
- BARSTATUS = 0x00002000, /* broadcast address received */
- PHYSTATUS = 0x00001000, /* physical address received */
- RXFSD = 0x00000800, /* first descriptor */
- RXLSD = 0x00000400, /* last descriptor */
- ErrorSummary = 0x80, /* error summary */
- RUNTPKT = 0x40, /* runt packet received */
- LONGPKT = 0x20, /* long packet received */
- FAE = 0x10, /* frame align error */
- CRC = 0x08, /* crc error */
- RXER = 0x04, /* receive error */
-};
-
-enum rx_desc_control_bits {
- RXIC = 0x00800000, /* interrupt control */
- RBSShift = 0,
-};
-
-enum tx_desc_status_bits {
- TXOWN = 0x80000000, /* own bit */
- JABTO = 0x00004000, /* jabber timeout */
- CSL = 0x00002000, /* carrier sense lost */
- LC = 0x00001000, /* late collision */
- EC = 0x00000800, /* excessive collision */
- UDF = 0x00000400, /* fifo underflow */
- DFR = 0x00000200, /* deferred */
- HF = 0x00000100, /* heartbeat fail */
- NCRMask = 0x000000ff, /* collision retry count */
- NCRShift = 0,
-};
-
-enum tx_desc_control_bits {
- TXIC = 0x80000000, /* interrupt control */
- ETIControl = 0x40000000, /* early transmit interrupt */
- TXLD = 0x20000000, /* last descriptor */
- TXFD = 0x10000000, /* first descriptor */
- CRCEnable = 0x08000000, /* crc control */
- PADEnable = 0x04000000, /* padding control */
- RetryTxLC = 0x02000000, /* retry late collision */
- PKTSMask = 0x3ff800, /* packet size bit21-11 */
- PKTSShift = 11,
- TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
- TBSShift = 0,
-};
-
-/* BootROM/EEPROM/MII Management Register */
-#define MASK_MIIR_MII_READ 0x00000000
-#define MASK_MIIR_MII_WRITE 0x00000008
-#define MASK_MIIR_MII_MDO 0x00000004
-#define MASK_MIIR_MII_MDI 0x00000002
-#define MASK_MIIR_MII_MDC 0x00000001
-
-/* ST+OP+PHYAD+REGAD+TA */
-#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
-#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
-
-/* ------------------------------------------------------------------------- */
-/* Constants for Myson PHY */
-/* ------------------------------------------------------------------------- */
-#define MysonPHYID 0xd0000302
-/* 89-7-27 add, (begin) */
-#define MysonPHYID0 0x0302
-#define StatusRegister 18
-#define SPEED100 0x0400 // bit10
-#define FULLMODE 0x0800 // bit11
-/* 89-7-27 add, (end) */
-
-/* ------------------------------------------------------------------------- */
-/* Constants for Seeq 80225 PHY */
-/* ------------------------------------------------------------------------- */
-#define SeeqPHYID0 0x0016
-
-#define MIIRegister18 18
-#define SPD_DET_100 0x80
-#define DPLX_DET_FULL 0x40
-
-/* ------------------------------------------------------------------------- */
-/* Constants for Ahdoc 101 PHY */
-/* ------------------------------------------------------------------------- */
-#define AhdocPHYID0 0x0022
-
-#define DiagnosticReg 18
-#define DPLX_FULL 0x0800
-#define Speed_100 0x0400
-
-/* 89/6/13 add, */
-/* -------------------------------------------------------------------------- */
-/* Constants */
-/* -------------------------------------------------------------------------- */
-#define MarvellPHYID0 0x0141
-#define LevelOnePHYID0 0x0013
-
-#define MII1000BaseTControlReg 9
-#define MII1000BaseTStatusReg 10
-#define SpecificReg 17
-
-/* for 1000BaseT Control Register */
-#define PHYAbletoPerform1000FullDuplex 0x0200
-#define PHYAbletoPerform1000HalfDuplex 0x0100
-#define PHY1000AbilityMask 0x300
-
-// for phy specific status register, marvell phy.
-#define SpeedMask 0x0c000
-#define Speed_1000M 0x08000
-#define Speed_100M 0x4000
-#define Speed_10M 0
-#define Full_Duplex 0x2000
-
-// 89/12/29 add, for phy specific status register, levelone phy, (begin)
-#define LXT1000_100M 0x08000
-#define LXT1000_1000M 0x0c000
-#define LXT1000_Full 0x200
-// 89/12/29 add, for phy specific status register, levelone phy, (end)
-
-/* for 3-in-1 case, BMCRSR register */
-#define LinkIsUp2 0x00040000
-
-/* for PHY */
-#define LinkIsUp 0x0004
-
-
-struct netdev_private {
- /* Descriptor rings first for alignment. */
- struct fealnx_desc *rx_ring;
- struct fealnx_desc *tx_ring;
-
- dma_addr_t rx_ring_dma;
- dma_addr_t tx_ring_dma;
-
- spinlock_t lock;
-
- /* Media monitoring timer. */
- struct timer_list timer;
-
- /* Reset timer */
- struct timer_list reset_timer;
- int reset_timer_armed;
- unsigned long crvalue_sv;
- unsigned long imrvalue_sv;
-
- /* Frequently used values: keep some adjacent for cache effect. */
- int flags;
- struct pci_dev *pci_dev;
- unsigned long crvalue;
- unsigned long bcrvalue;
- unsigned long imrvalue;
- struct fealnx_desc *cur_rx;
- struct fealnx_desc *lack_rxbuf;
- int really_rx_count;
- struct fealnx_desc *cur_tx;
- struct fealnx_desc *cur_tx_copy;
- int really_tx_count;
- int free_tx_count;
- unsigned int rx_buf_sz; /* Based on MTU+slack. */
-
- /* These values are keep track of the transceiver/media in use. */
- unsigned int linkok;
- unsigned int line_speed;
- unsigned int duplexmode;
- unsigned int default_port:4; /* Last dev->if_port value. */
- unsigned int PHYType;
-
- /* MII transceiver section. */
- int mii_cnt; /* MII device addresses. */
- unsigned char phys[2]; /* MII device addresses. */
- struct mii_if_info mii;
- void __iomem *mem;
-};
-
-
-static int mdio_read(struct net_device *dev, int phy_id, int location);
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
-static int netdev_open(struct net_device *dev);
-static void getlinktype(struct net_device *dev);
-static void getlinkstatus(struct net_device *dev);
-static void netdev_timer(struct timer_list *t);
-static void reset_timer(struct timer_list *t);
-static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void init_ring(struct net_device *dev);
-static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t intr_handler(int irq, void *dev_instance);
-static int netdev_rx(struct net_device *dev);
-static void set_rx_mode(struct net_device *dev);
-static void __set_rx_mode(struct net_device *dev);
-static struct net_device_stats *get_stats(struct net_device *dev);
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops netdev_ethtool_ops;
-static int netdev_close(struct net_device *dev);
-static void reset_rx_descriptors(struct net_device *dev);
-static void reset_tx_descriptors(struct net_device *dev);
-
-static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
-{
- int delay = 0x1000;
- iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
- while (--delay) {
- if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
- break;
- }
-}
-
-
-static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
-{
- int delay = 0x1000;
- iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
- while (--delay) {
- if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
- == (CR_R_RXSTOP+CR_R_TXSTOP) )
- break;
- }
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = netdev_open,
- .ndo_stop = netdev_close,
- .ndo_start_xmit = start_tx,
- .ndo_get_stats = get_stats,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_eth_ioctl = mii_ioctl,
- .ndo_tx_timeout = fealnx_tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int fealnx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct netdev_private *np;
- int i, option, err, irq;
- static int card_idx = -1;
- char boardname[12];
- void __iomem *ioaddr;
- unsigned long len;
- unsigned int chip_id = ent->driver_data;
- struct net_device *dev;
- void *ring_space;
- dma_addr_t ring_dma;
- u8 addr[ETH_ALEN];
-#ifdef USE_IO_OPS
- int bar = 0;
-#else
- int bar = 1;
-#endif
-
- card_idx++;
- sprintf(boardname, "fealnx%d", card_idx);
-
- option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-
- i = pci_enable_device(pdev);
- if (i) return i;
- pci_set_master(pdev);
-
- len = pci_resource_len(pdev, bar);
- if (len < MIN_REGION_SIZE) {
- dev_err(&pdev->dev,
- "region size %ld too small, aborting\n", len);
- return -ENODEV;
- }
-
- i = pci_request_regions(pdev, boardname);
- if (i)
- return i;
-
- irq = pdev->irq;
-
- ioaddr = pci_iomap(pdev, bar, len);
- if (!ioaddr) {
- err = -ENOMEM;
- goto err_out_res;
- }
-
- dev = alloc_etherdev(sizeof(struct netdev_private));
- if (!dev) {
- err = -ENOMEM;
- goto err_out_unmap;
- }
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /* read ethernet id */
- for (i = 0; i < 6; ++i)
- addr[i] = ioread8(ioaddr + PAR0 + i);
- eth_hw_addr_set(dev, addr);
-
- /* Reset the chip to erase previous misconfiguration. */
- iowrite32(0x00000001, ioaddr + BCR);
-
- /* Make certain the descriptor lists are aligned. */
- np = netdev_priv(dev);
- np->mem = ioaddr;
- spin_lock_init(&np->lock);
- np->pci_dev = pdev;
- np->flags = skel_netdrv_tbl[chip_id].flags;
- pci_set_drvdata(pdev, dev);
- np->mii.dev = dev;
- np->mii.mdio_read = mdio_read;
- np->mii.mdio_write = mdio_write;
- np->mii.phy_id_mask = 0x1f;
- np->mii.reg_num_mask = 0x1f;
-
- ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
- GFP_KERNEL);
- if (!ring_space) {
- err = -ENOMEM;
- goto err_out_free_dev;
- }
- np->rx_ring = ring_space;
- np->rx_ring_dma = ring_dma;
-
- ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
- GFP_KERNEL);
- if (!ring_space) {
- err = -ENOMEM;
- goto err_out_free_rx;
- }
- np->tx_ring = ring_space;
- np->tx_ring_dma = ring_dma;
-
- /* find the connected MII xcvrs */
- if (np->flags == HAS_MII_XCVR) {
- int phy, phy_idx = 0;
-
- for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
- phy++) {
- int mii_status = mdio_read(dev, phy, 1);
-
- if (mii_status != 0xffff && mii_status != 0x0000) {
- np->phys[phy_idx++] = phy;
- dev_info(&pdev->dev,
- "MII PHY found at address %d, status "
- "0x%4.4x.\n", phy, mii_status);
- /* get phy type */
- {
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], 2);
- if (data == SeeqPHYID0)
- np->PHYType = SeeqPHY;
- else if (data == AhdocPHYID0)
- np->PHYType = AhdocPHY;
- else if (data == MarvellPHYID0)
- np->PHYType = MarvellPHY;
- else if (data == MysonPHYID0)
- np->PHYType = Myson981;
- else if (data == LevelOnePHYID0)
- np->PHYType = LevelOnePHY;
- else
- np->PHYType = OtherPHY;
- }
- }
- }
-
- np->mii_cnt = phy_idx;
- if (phy_idx == 0)
- dev_warn(&pdev->dev,
- "MII PHY not found -- this device may "
- "not operate correctly.\n");
- } else {
- np->phys[0] = 32;
-/* 89/6/23 add, (begin) */
- /* get phy type */
- if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
- np->PHYType = MysonPHY;
- else
- np->PHYType = OtherPHY;
- }
- np->mii.phy_id = np->phys[0];
-
- if (dev->mem_start)
- option = dev->mem_start;
-
- /* The lower four bits are the media type. */
- if (option > 0) {
- if (option & 0x200)
- np->mii.full_duplex = 1;
- np->default_port = option & 15;
- }
-
- if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
- np->mii.full_duplex = full_duplex[card_idx];
-
- if (np->mii.full_duplex) {
- dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
-/* 89/6/13 add, (begin) */
-// if (np->PHYType==MarvellPHY)
- if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], 9);
- data = (data & 0xfcff) | 0x0200;
- mdio_write(dev, np->phys[0], 9, data);
- }
-/* 89/6/13 add, (end) */
- if (np->flags == HAS_MII_XCVR)
- mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
- else
- iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
- np->mii.force_media = 1;
- }
-
- dev->netdev_ops = &netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- err = register_netdev(dev);
- if (err)
- goto err_out_free_tx;
-
- printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
- dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
- dev->dev_addr, irq);
-
- return 0;
-
-err_out_free_tx:
- dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
-err_out_free_rx:
- dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
-err_out_free_dev:
- free_netdev(dev);
-err_out_unmap:
- pci_iounmap(pdev, ioaddr);
-err_out_res:
- pci_release_regions(pdev);
- return err;
-}
-
-
-static void fealnx_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- struct netdev_private *np = netdev_priv(dev);
-
- dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
- dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
- unregister_netdev(dev);
- pci_iounmap(pdev, np->mem);
- free_netdev(dev);
- pci_release_regions(pdev);
- } else
- printk(KERN_ERR "fealnx: remove for unknown device\n");
-}
-
-
-static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
-{
- ulong miir;
- int i;
- unsigned int mask, data;
-
- /* enable MII output */
- miir = (ulong) ioread32(miiport);
- miir &= 0xfffffff0;
-
- miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
-
- /* send 32 1's preamble */
- for (i = 0; i < 32; i++) {
- /* low MDC; MDO is already high (miir) */
- miir &= ~MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
-
- /* high MDC */
- miir |= MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
- }
-
- /* calculate ST+OP+PHYAD+REGAD+TA */
- data = opcode | (phyad << 7) | (regad << 2);
-
- /* sent out */
- mask = 0x8000;
- while (mask) {
- /* low MDC, prepare MDO */
- miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
- if (mask & data)
- miir |= MASK_MIIR_MII_MDO;
-
- iowrite32(miir, miiport);
- /* high MDC */
- miir |= MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
- udelay(30);
-
- /* next */
- mask >>= 1;
- if (mask == 0x2 && opcode == OP_READ)
- miir &= ~MASK_MIIR_MII_WRITE;
- }
- return miir;
-}
-
-
-static int mdio_read(struct net_device *dev, int phyad, int regad)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *miiport = np->mem + MANAGEMENT;
- ulong miir;
- unsigned int mask, data;
-
- miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
-
- /* read data */
- mask = 0x8000;
- data = 0;
- while (mask) {
- /* low MDC */
- miir &= ~MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
-
- /* read MDI */
- miir = ioread32(miiport);
- if (miir & MASK_MIIR_MII_MDI)
- data |= mask;
-
- /* high MDC, and wait */
- miir |= MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
- udelay(30);
-
- /* next */
- mask >>= 1;
- }
-
- /* low MDC */
- miir &= ~MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
-
- return data & 0xffff;
-}
-
-
-static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *miiport = np->mem + MANAGEMENT;
- ulong miir;
- unsigned int mask;
-
- miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
-
- /* write data */
- mask = 0x8000;
- while (mask) {
- /* low MDC, prepare MDO */
- miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
- if (mask & data)
- miir |= MASK_MIIR_MII_MDO;
- iowrite32(miir, miiport);
-
- /* high MDC */
- miir |= MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
-
- /* next */
- mask >>= 1;
- }
-
- /* low MDC */
- miir &= ~MASK_MIIR_MII_MDC;
- iowrite32(miir, miiport);
-}
-
-
-static int netdev_open(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
- const int irq = np->pci_dev->irq;
- int rc, i;
-
- iowrite32(0x00000001, ioaddr + BCR); /* Reset */
-
- rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
- if (rc)
- return -EAGAIN;
-
- for (i = 0; i < 3; i++)
- iowrite16(((const unsigned short *)dev->dev_addr)[i],
- ioaddr + PAR0 + i*2);
-
- init_ring(dev);
-
- iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
- iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
-
- /* Initialize other registers. */
- /* Configure the PCI bus bursts and FIFO thresholds.
- 486: Set 8 longword burst.
- 586: no burst limit.
- Burst length 5:3
- 0 0 0 1
- 0 0 1 4
- 0 1 0 8
- 0 1 1 16
- 1 0 0 32
- 1 0 1 64
- 1 1 0 128
- 1 1 1 256
- Wait the specified 50 PCI cycles after a reset by initializing
- Tx and Rx queues and the address filter list.
- FIXME (Ueimor): optimistic for alpha + posted writes ? */
-
- np->bcrvalue = 0x10; /* little-endian, 8 burst length */
-#ifdef __BIG_ENDIAN
- np->bcrvalue |= 0x04; /* big-endian */
-#endif
-
-#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
- if (boot_cpu_data.x86 <= 4)
- np->crvalue = 0xa00;
- else
-#endif
- np->crvalue = 0xe00; /* rx 128 burst length */
-
-
-// 89/12/29 add,
-// 90/1/16 modify,
-// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
- np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
- if (np->pci_dev->device == 0x891) {
- np->bcrvalue |= 0x200; /* set PROG bit */
- np->crvalue |= CR_W_ENH; /* set enhanced bit */
- np->imrvalue |= ETI;
- }
- iowrite32(np->bcrvalue, ioaddr + BCR);
-
- if (dev->if_port == 0)
- dev->if_port = np->default_port;
-
- iowrite32(0, ioaddr + RXPDR);
-// 89/9/1 modify,
-// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
- np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
- np->mii.full_duplex = np->mii.force_media;
- getlinkstatus(dev);
- if (np->linkok)
- getlinktype(dev);
- __set_rx_mode(dev);
-
- netif_start_queue(dev);
-
- /* Clear and Enable interrupts by setting the interrupt mask. */
- iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
- iowrite32(np->imrvalue, ioaddr + IMR);
-
- if (debug)
- printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
-
- /* Set the timer to check for link beat. */
- timer_setup(&np->timer, netdev_timer, 0);
- np->timer.expires = RUN_AT(3 * HZ);
-
- /* timer handler */
- add_timer(&np->timer);
-
- timer_setup(&np->reset_timer, reset_timer, 0);
- np->reset_timer_armed = 0;
- return rc;
-}
-
-
-static void getlinkstatus(struct net_device *dev)
-/* function: Routine will read MII Status Register to get link status. */
-/* input : dev... pointer to the adapter block. */
-/* output : none. */
-{
- struct netdev_private *np = netdev_priv(dev);
- unsigned int i, DelayTime = 0x1000;
-
- np->linkok = 0;
-
- if (np->PHYType == MysonPHY) {
- for (i = 0; i < DelayTime; ++i) {
- if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
- np->linkok = 1;
- return;
- }
- udelay(100);
- }
- } else {
- for (i = 0; i < DelayTime; ++i) {
- if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
- np->linkok = 1;
- return;
- }
- udelay(100);
- }
- }
-}
-
-
-static void getlinktype(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
-
- if (np->PHYType == MysonPHY) { /* 3-in-1 case */
- if (ioread32(np->mem + TCRRCR) & CR_R_FD)
- np->duplexmode = 2; /* full duplex */
- else
- np->duplexmode = 1; /* half duplex */
- if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
- np->line_speed = 1; /* 10M */
- else
- np->line_speed = 2; /* 100M */
- } else {
- if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], MIIRegister18);
- if (data & SPD_DET_100)
- np->line_speed = 2; /* 100M */
- else
- np->line_speed = 1; /* 10M */
- if (data & DPLX_DET_FULL)
- np->duplexmode = 2; /* full duplex mode */
- else
- np->duplexmode = 1; /* half duplex mode */
- } else if (np->PHYType == AhdocPHY) {
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], DiagnosticReg);
- if (data & Speed_100)
- np->line_speed = 2; /* 100M */
- else
- np->line_speed = 1; /* 10M */
- if (data & DPLX_FULL)
- np->duplexmode = 2; /* full duplex mode */
- else
- np->duplexmode = 1; /* half duplex mode */
- }
-/* 89/6/13 add, (begin) */
- else if (np->PHYType == MarvellPHY) {
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], SpecificReg);
- if (data & Full_Duplex)
- np->duplexmode = 2; /* full duplex mode */
- else
- np->duplexmode = 1; /* half duplex mode */
- data &= SpeedMask;
- if (data == Speed_1000M)
- np->line_speed = 3; /* 1000M */
- else if (data == Speed_100M)
- np->line_speed = 2; /* 100M */
- else
- np->line_speed = 1; /* 10M */
- }
-/* 89/6/13 add, (end) */
-/* 89/7/27 add, (begin) */
- else if (np->PHYType == Myson981) {
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], StatusRegister);
-
- if (data & SPEED100)
- np->line_speed = 2;
- else
- np->line_speed = 1;
-
- if (data & FULLMODE)
- np->duplexmode = 2;
- else
- np->duplexmode = 1;
- }
-/* 89/7/27 add, (end) */
-/* 89/12/29 add */
- else if (np->PHYType == LevelOnePHY) {
- unsigned int data;
-
- data = mdio_read(dev, np->phys[0], SpecificReg);
- if (data & LXT1000_Full)
- np->duplexmode = 2; /* full duplex mode */
- else
- np->duplexmode = 1; /* half duplex mode */
- data &= SpeedMask;
- if (data == LXT1000_1000M)
- np->line_speed = 3; /* 1000M */
- else if (data == LXT1000_100M)
- np->line_speed = 2; /* 100M */
- else
- np->line_speed = 1; /* 10M */
- }
- np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
- if (np->line_speed == 1)
- np->crvalue |= CR_W_PS10;
- else if (np->line_speed == 3)
- np->crvalue |= CR_W_PS1000;
- if (np->duplexmode == 2)
- np->crvalue |= CR_W_FD;
- }
-}
-
-
-/* Take lock before calling this */
-static void allocate_rx_buffers(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
-
- /* allocate skb for rx buffers */
- while (np->really_rx_count != RX_RING_SIZE) {
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, np->rx_buf_sz);
- if (skb == NULL)
- break; /* Better luck next round. */
-
- while (np->lack_rxbuf->skbuff)
- np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
-
- np->lack_rxbuf->skbuff = skb;
- np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
- skb->data,
- np->rx_buf_sz,
- DMA_FROM_DEVICE);
- np->lack_rxbuf->status = RXOWN;
- ++np->really_rx_count;
- }
-}
-
-
-static void netdev_timer(struct timer_list *t)
-{
- struct netdev_private *np = from_timer(np, t, timer);
- struct net_device *dev = np->mii.dev;
- void __iomem *ioaddr = np->mem;
- int old_crvalue = np->crvalue;
- unsigned int old_linkok = np->linkok;
- unsigned long flags;
-
- if (debug)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
- "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
- ioread32(ioaddr + TCRRCR));
-
- spin_lock_irqsave(&np->lock, flags);
-
- if (np->flags == HAS_MII_XCVR) {
- getlinkstatus(dev);
- if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
- getlinktype(dev);
- if (np->crvalue != old_crvalue) {
- stop_nic_rxtx(ioaddr, np->crvalue);
- iowrite32(np->crvalue, ioaddr + TCRRCR);
- }
- }
- }
-
- allocate_rx_buffers(dev);
-
- spin_unlock_irqrestore(&np->lock, flags);
-
- np->timer.expires = RUN_AT(10 * HZ);
- add_timer(&np->timer);
-}
-
-
-/* Take lock before calling */
-/* Reset chip and disable rx, tx and interrupts */
-static void reset_and_disable_rxtx(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
- int delay=51;
-
- /* Reset the chip's Tx and Rx processes. */
- stop_nic_rxtx(ioaddr, 0);
-
- /* Disable interrupts by clearing the interrupt mask. */
- iowrite32(0, ioaddr + IMR);
-
- /* Reset the chip to erase previous misconfiguration. */
- iowrite32(0x00000001, ioaddr + BCR);
-
- /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
- We surely wait too long (address+data phase). Who cares? */
- while (--delay) {
- ioread32(ioaddr + BCR);
- rmb();
- }
-}
-
-
-/* Take lock before calling */
-/* Restore chip after reset */
-static void enable_rxtx(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
-
- reset_rx_descriptors(dev);
-
- iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
- ioaddr + TXLBA);
- iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
- ioaddr + RXLBA);
-
- iowrite32(np->bcrvalue, ioaddr + BCR);
-
- iowrite32(0, ioaddr + RXPDR);
- __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
-
- /* Clear and Enable interrupts by setting the interrupt mask. */
- iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
- iowrite32(np->imrvalue, ioaddr + IMR);
-
- iowrite32(0, ioaddr + TXPDR);
-}
-
-
-static void reset_timer(struct timer_list *t)
-{
- struct netdev_private *np = from_timer(np, t, reset_timer);
- struct net_device *dev = np->mii.dev;
- unsigned long flags;
-
- printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
-
- spin_lock_irqsave(&np->lock, flags);
- np->crvalue = np->crvalue_sv;
- np->imrvalue = np->imrvalue_sv;
-
- reset_and_disable_rxtx(dev);
- /* works for me without this:
- reset_tx_descriptors(dev); */
- enable_rxtx(dev);
- netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
-
- np->reset_timer_armed = 0;
-
- spin_unlock_irqrestore(&np->lock, flags);
-}
-
-
-static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
- unsigned long flags;
- int i;
-
- printk(KERN_WARNING
- "%s: Transmit timed out, status %8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + ISR));
-
- {
- printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
- for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- (unsigned int) np->rx_ring[i].status);
- printk(KERN_CONT "\n");
- printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
- for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
- printk(KERN_CONT "\n");
- }
-
- spin_lock_irqsave(&np->lock, flags);
-
- reset_and_disable_rxtx(dev);
- reset_tx_descriptors(dev);
- enable_rxtx(dev);
-
- spin_unlock_irqrestore(&np->lock, flags);
-
- netif_trans_update(dev); /* prevent tx timeout */
- dev->stats.tx_errors++;
- netif_wake_queue(dev); /* or .._start_.. ?? */
-}
-
-
-/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void init_ring(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- int i;
-
- /* initialize rx variables */
- np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- np->cur_rx = &np->rx_ring[0];
- np->lack_rxbuf = np->rx_ring;
- np->really_rx_count = 0;
-
- /* initial rx descriptors. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].status = 0;
- np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
- np->rx_ring[i].next_desc = np->rx_ring_dma +
- (i + 1)*sizeof(struct fealnx_desc);
- np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
- np->rx_ring[i].skbuff = NULL;
- }
-
- /* for the last rx descriptor */
- np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
- np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
-
- /* allocate skb for rx buffers */
- for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
-
- if (skb == NULL) {
- np->lack_rxbuf = &np->rx_ring[i];
- break;
- }
-
- ++np->really_rx_count;
- np->rx_ring[i].skbuff = skb;
- np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
- skb->data,
- np->rx_buf_sz,
- DMA_FROM_DEVICE);
- np->rx_ring[i].status = RXOWN;
- np->rx_ring[i].control |= RXIC;
- }
-
- /* initialize tx variables */
- np->cur_tx = &np->tx_ring[0];
- np->cur_tx_copy = &np->tx_ring[0];
- np->really_tx_count = 0;
- np->free_tx_count = TX_RING_SIZE;
-
- for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_ring[i].status = 0;
- /* do we need np->tx_ring[i].control = XXX; ?? */
- np->tx_ring[i].next_desc = np->tx_ring_dma +
- (i + 1)*sizeof(struct fealnx_desc);
- np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
- np->tx_ring[i].skbuff = NULL;
- }
-
- /* for the last tx descriptor */
- np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
- np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
-}
-
-
-static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&np->lock, flags);
-
- np->cur_tx_copy->skbuff = skb;
-
-#define one_buffer
-#define BPT 1022
-#if defined(one_buffer)
- np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
- np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
- np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
-// 89/12/29 add,
- if (np->pci_dev->device == 0x891)
- np->cur_tx_copy->control |= ETIControl | RetryTxLC;
- np->cur_tx_copy->status = TXOWN;
- np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
- --np->free_tx_count;
-#elif defined(two_buffer)
- if (skb->len > BPT) {
- struct fealnx_desc *next;
-
- /* for the first descriptor */
- np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
- skb->data, BPT,
- DMA_TO_DEVICE);
- np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
- np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
- np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
-
- /* for the last descriptor */
- next = np->cur_tx_copy->next_desc_logical;
- next->skbuff = skb;
- next->control = TXIC | TXLD | CRCEnable | PADEnable;
- next->control |= (skb->len << PKTSShift); /* pkt size */
- next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
-// 89/12/29 add,
- if (np->pci_dev->device == 0x891)
- np->cur_tx_copy->control |= ETIControl | RetryTxLC;
- next->buffer = dma_map_single(&ep->pci_dev->dev,
- skb->data + BPT, skb->len - BPT,
- DMA_TO_DEVICE);
-
- next->status = TXOWN;
- np->cur_tx_copy->status = TXOWN;
-
- np->cur_tx_copy = next->next_desc_logical;
- np->free_tx_count -= 2;
- } else {
- np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
- skb->data, skb->len,
- DMA_TO_DEVICE);
- np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
- np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
- np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
-// 89/12/29 add,
- if (np->pci_dev->device == 0x891)
- np->cur_tx_copy->control |= ETIControl | RetryTxLC;
- np->cur_tx_copy->status = TXOWN;
- np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
- --np->free_tx_count;
- }
-#endif
-
- if (np->free_tx_count < 2)
- netif_stop_queue(dev);
- ++np->really_tx_count;
- iowrite32(0, np->mem + TXPDR);
-
- spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_OK;
-}
-
-
-/* Take lock before calling */
-/* Chip probably hosed tx ring. Clean up. */
-static void reset_tx_descriptors(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- struct fealnx_desc *cur;
- int i;
-
- /* initialize tx variables */
- np->cur_tx = &np->tx_ring[0];
- np->cur_tx_copy = &np->tx_ring[0];
- np->really_tx_count = 0;
- np->free_tx_count = TX_RING_SIZE;
-
- for (i = 0; i < TX_RING_SIZE; i++) {
- cur = &np->tx_ring[i];
- if (cur->skbuff) {
- dma_unmap_single(&np->pci_dev->dev, cur->buffer,
- cur->skbuff->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(cur->skbuff);
- cur->skbuff = NULL;
- }
- cur->status = 0;
- cur->control = 0; /* needed? */
- /* probably not needed. We do it for purely paranoid reasons */
- cur->next_desc = np->tx_ring_dma +
- (i + 1)*sizeof(struct fealnx_desc);
- cur->next_desc_logical = &np->tx_ring[i + 1];
- }
- /* for the last tx descriptor */
- np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
- np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
-}
-
-
-/* Take lock and stop rx before calling this */
-static void reset_rx_descriptors(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- struct fealnx_desc *cur = np->cur_rx;
- int i;
-
- allocate_rx_buffers(dev);
-
- for (i = 0; i < RX_RING_SIZE; i++) {
- if (cur->skbuff)
- cur->status = RXOWN;
- cur = cur->next_desc_logical;
- }
-
- iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
- np->mem + RXLBA);
-}
-
-
-/* The interrupt handler does all of the Rx thread work and cleans up
- after the Tx thread. */
-static irqreturn_t intr_handler(int irq, void *dev_instance)
-{
- struct net_device *dev = (struct net_device *) dev_instance;
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
- long boguscnt = max_interrupt_work;
- unsigned int num_tx = 0;
- int handled = 0;
-
- spin_lock(&np->lock);
-
- iowrite32(0, ioaddr + IMR);
-
- do {
- u32 intr_status = ioread32(ioaddr + ISR);
-
- /* Acknowledge all of the current interrupt sources ASAP. */
- iowrite32(intr_status, ioaddr + ISR);
-
- if (debug)
- printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
- intr_status);
-
- if (!(intr_status & np->imrvalue))
- break;
-
- handled = 1;
-
-// 90/1/16 delete,
-//
-// if (intr_status & FBE)
-// { /* fatal error */
-// stop_nic_tx(ioaddr, 0);
-// stop_nic_rx(ioaddr, 0);
-// break;
-// };
-
- if (intr_status & TUNF)
- iowrite32(0, ioaddr + TXPDR);
-
- if (intr_status & CNTOVF) {
- /* missed pkts */
- dev->stats.rx_missed_errors +=
- ioread32(ioaddr + TALLY) & 0x7fff;
-
- /* crc error */
- dev->stats.rx_crc_errors +=
- (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
- }
-
- if (intr_status & (RI | RBU)) {
- if (intr_status & RI)
- netdev_rx(dev);
- else {
- stop_nic_rx(ioaddr, np->crvalue);
- reset_rx_descriptors(dev);
- iowrite32(np->crvalue, ioaddr + TCRRCR);
- }
- }
-
- while (np->really_tx_count) {
- long tx_status = np->cur_tx->status;
- long tx_control = np->cur_tx->control;
-
- if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
- struct fealnx_desc *next;
-
- next = np->cur_tx->next_desc_logical;
- tx_status = next->status;
- tx_control = next->control;
- }
-
- if (tx_status & TXOWN)
- break;
-
- if (!(np->crvalue & CR_W_ENH)) {
- if (tx_status & (CSL | LC | EC | UDF | HF)) {
- dev->stats.tx_errors++;
- if (tx_status & EC)
- dev->stats.tx_aborted_errors++;
- if (tx_status & CSL)
- dev->stats.tx_carrier_errors++;
- if (tx_status & LC)
- dev->stats.tx_window_errors++;
- if (tx_status & UDF)
- dev->stats.tx_fifo_errors++;
- if ((tx_status & HF) && np->mii.full_duplex == 0)
- dev->stats.tx_heartbeat_errors++;
-
- } else {
- dev->stats.tx_bytes +=
- ((tx_control & PKTSMask) >> PKTSShift);
-
- dev->stats.collisions +=
- ((tx_status & NCRMask) >> NCRShift);
- dev->stats.tx_packets++;
- }
- } else {
- dev->stats.tx_bytes +=
- ((tx_control & PKTSMask) >> PKTSShift);
- dev->stats.tx_packets++;
- }
-
- /* Free the original skb. */
- dma_unmap_single(&np->pci_dev->dev,
- np->cur_tx->buffer,
- np->cur_tx->skbuff->len,
- DMA_TO_DEVICE);
- dev_consume_skb_irq(np->cur_tx->skbuff);
- np->cur_tx->skbuff = NULL;
- --np->really_tx_count;
- if (np->cur_tx->control & TXLD) {
- np->cur_tx = np->cur_tx->next_desc_logical;
- ++np->free_tx_count;
- } else {
- np->cur_tx = np->cur_tx->next_desc_logical;
- np->cur_tx = np->cur_tx->next_desc_logical;
- np->free_tx_count += 2;
- }
- num_tx++;
- } /* end of for loop */
-
- if (num_tx && np->free_tx_count >= 2)
- netif_wake_queue(dev);
-
- /* read transmit status for enhanced mode only */
- if (np->crvalue & CR_W_ENH) {
- long data;
-
- data = ioread32(ioaddr + TSR);
- dev->stats.tx_errors += (data & 0xff000000) >> 24;
- dev->stats.tx_aborted_errors +=
- (data & 0xff000000) >> 24;
- dev->stats.tx_window_errors +=
- (data & 0x00ff0000) >> 16;
- dev->stats.collisions += (data & 0x0000ffff);
- }
-
- if (--boguscnt < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n", dev->name, intr_status);
- if (!np->reset_timer_armed) {
- np->reset_timer_armed = 1;
- np->reset_timer.expires = RUN_AT(HZ/2);
- add_timer(&np->reset_timer);
- stop_nic_rxtx(ioaddr, 0);
- netif_stop_queue(dev);
- /* or netif_tx_disable(dev); ?? */
- /* Prevent other paths from enabling tx,rx,intrs */
- np->crvalue_sv = np->crvalue;
- np->imrvalue_sv = np->imrvalue;
- np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
- np->imrvalue = 0;
- }
-
- break;
- }
- } while (1);
-
- /* read the tally counters */
- /* missed pkts */
- dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
-
- /* crc error */
- dev->stats.rx_crc_errors +=
- (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
-
- if (debug)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread32(ioaddr + ISR));
-
- iowrite32(np->imrvalue, ioaddr + IMR);
-
- spin_unlock(&np->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-
-/* This routine is logically part of the interrupt handler, but separated
- for clarity and better register allocation. */
-static int netdev_rx(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
-
- /* If EOP is set on the next entry, it's a new packet. Send it up. */
- while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
- s32 rx_status = np->cur_rx->status;
-
- if (np->really_rx_count == 0)
- break;
-
- if (debug)
- printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
-
- if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
- (rx_status & ErrorSummary)) {
- if (rx_status & ErrorSummary) { /* there was a fatal error */
- if (debug)
- printk(KERN_DEBUG
- "%s: Receive error, Rx status %8.8x.\n",
- dev->name, rx_status);
-
- dev->stats.rx_errors++; /* end of a packet. */
- if (rx_status & (LONGPKT | RUNTPKT))
- dev->stats.rx_length_errors++;
- if (rx_status & RXER)
- dev->stats.rx_frame_errors++;
- if (rx_status & CRC)
- dev->stats.rx_crc_errors++;
- } else {
- int need_to_reset = 0;
- int desno = 0;
-
- if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
- struct fealnx_desc *cur;
-
- /* check this packet is received completely? */
- cur = np->cur_rx;
- while (desno <= np->really_rx_count) {
- ++desno;
- if ((!(cur->status & RXOWN)) &&
- (cur->status & RXLSD))
- break;
- /* goto next rx descriptor */
- cur = cur->next_desc_logical;
- }
- if (desno > np->really_rx_count)
- need_to_reset = 1;
- } else /* RXLSD did not find, something error */
- need_to_reset = 1;
-
- if (need_to_reset == 0) {
- int i;
-
- dev->stats.rx_length_errors++;
-
- /* free all rx descriptors related this long pkt */
- for (i = 0; i < desno; ++i) {
- if (!np->cur_rx->skbuff) {
- printk(KERN_DEBUG
- "%s: I'm scared\n", dev->name);
- break;
- }
- np->cur_rx->status = RXOWN;
- np->cur_rx = np->cur_rx->next_desc_logical;
- }
- continue;
- } else { /* rx error, need to reset this chip */
- stop_nic_rx(ioaddr, np->crvalue);
- reset_rx_descriptors(dev);
- iowrite32(np->crvalue, ioaddr + TCRRCR);
- }
- break; /* exit the while loop */
- }
- } else { /* this received pkt is ok */
-
- struct sk_buff *skb;
- /* Omit the four octet CRC from the length. */
- short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
-
-#ifndef final_version
- if (debug)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- " status %x.\n", pkt_len, rx_status);
-#endif
-
- /* Check if the packet is long enough to accept without copying
- to a minimally-sized skbuff. */
- if (pkt_len < rx_copybreak &&
- (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
- skb_reserve(skb, 2); /* 16 byte align the IP header */
- dma_sync_single_for_cpu(&np->pci_dev->dev,
- np->cur_rx->buffer,
- np->rx_buf_sz,
- DMA_FROM_DEVICE);
- /* Call copy + cksum if available. */
-
-#if ! defined(__alpha__)
- skb_copy_to_linear_data(skb,
- np->cur_rx->skbuff->data, pkt_len);
- skb_put(skb, pkt_len);
-#else
- skb_put_data(skb, np->cur_rx->skbuff->data,
- pkt_len);
-#endif
- dma_sync_single_for_device(&np->pci_dev->dev,
- np->cur_rx->buffer,
- np->rx_buf_sz,
- DMA_FROM_DEVICE);
- } else {
- dma_unmap_single(&np->pci_dev->dev,
- np->cur_rx->buffer,
- np->rx_buf_sz,
- DMA_FROM_DEVICE);
- skb_put(skb = np->cur_rx->skbuff, pkt_len);
- np->cur_rx->skbuff = NULL;
- --np->really_rx_count;
- }
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-
- np->cur_rx = np->cur_rx->next_desc_logical;
- } /* end of while loop */
-
- /* allocate skb for rx buffers */
- allocate_rx_buffers(dev);
-
- return 0;
-}
-
-
-static struct net_device_stats *get_stats(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
-
- /* The chip only need report frame silently dropped. */
- if (netif_running(dev)) {
- dev->stats.rx_missed_errors +=
- ioread32(ioaddr + TALLY) & 0x7fff;
- dev->stats.rx_crc_errors +=
- (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
- }
-
- return &dev->stats;
-}
-
-
-/* for dev->set_multicast_list */
-static void set_rx_mode(struct net_device *dev)
-{
- spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
- unsigned long flags;
- spin_lock_irqsave(lp, flags);
- __set_rx_mode(dev);
- spin_unlock_irqrestore(lp, flags);
-}
-
-
-/* Take lock before calling */
-static void __set_rx_mode(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
- u32 mc_filter[2]; /* Multicast hash filter */
- u32 rx_mode;
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to match, or accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = CR_W_AB | CR_W_AM;
- } else {
- struct netdev_hw_addr *ha;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- unsigned int bit;
- bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
- mc_filter[bit >> 5] |= (1 << bit);
- }
- rx_mode = CR_W_AB | CR_W_AM;
- }
-
- stop_nic_rxtx(ioaddr, np->crvalue);
-
- iowrite32(mc_filter[0], ioaddr + MAR0);
- iowrite32(mc_filter[1], ioaddr + MAR1);
- np->crvalue &= ~CR_W_RXMODEMASK;
- np->crvalue |= rx_mode;
- iowrite32(np->crvalue, ioaddr + TCRRCR);
-}
-
-static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct netdev_private *np = netdev_priv(dev);
-
- strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
-}
-
-static int netdev_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
-
- spin_lock_irq(&np->lock);
- mii_ethtool_get_link_ksettings(&np->mii, cmd);
- spin_unlock_irq(&np->lock);
-
- return 0;
-}
-
-static int netdev_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
- int rc;
-
- spin_lock_irq(&np->lock);
- rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
- spin_unlock_irq(&np->lock);
-
- return rc;
-}
-
-static int netdev_nway_reset(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- return mii_nway_restart(&np->mii);
-}
-
-static u32 netdev_get_link(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- return mii_link_ok(&np->mii);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 value)
-{
- debug = value;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .nway_reset = netdev_nway_reset,
- .get_link = netdev_get_link,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
- .get_link_ksettings = netdev_get_link_ksettings,
- .set_link_ksettings = netdev_set_link_ksettings,
-};
-
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
- int rc;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- spin_lock_irq(&np->lock);
- rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
- spin_unlock_irq(&np->lock);
-
- return rc;
-}
-
-
-static int netdev_close(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mem;
- int i;
-
- netif_stop_queue(dev);
-
- /* Disable interrupts by clearing the interrupt mask. */
- iowrite32(0x0000, ioaddr + IMR);
-
- /* Stop the chip's Tx and Rx processes. */
- stop_nic_rxtx(ioaddr, 0);
-
- del_timer_sync(&np->timer);
- del_timer_sync(&np->reset_timer);
-
- free_irq(np->pci_dev->irq, dev);
-
- /* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = np->rx_ring[i].skbuff;
-
- np->rx_ring[i].status = 0;
- if (skb) {
- dma_unmap_single(&np->pci_dev->dev,
- np->rx_ring[i].buffer, np->rx_buf_sz,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- np->rx_ring[i].skbuff = NULL;
- }
- }
-
- for (i = 0; i < TX_RING_SIZE; i++) {
- struct sk_buff *skb = np->tx_ring[i].skbuff;
-
- if (skb) {
- dma_unmap_single(&np->pci_dev->dev,
- np->tx_ring[i].buffer, skb->len,
- DMA_TO_DEVICE);
- dev_kfree_skb(skb);
- np->tx_ring[i].skbuff = NULL;
- }
- }
-
- return 0;
-}
-
-static const struct pci_device_id fealnx_pci_tbl[] = {
- {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
- {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
- {} /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
-
-
-static struct pci_driver fealnx_driver = {
- .name = "fealnx",
- .id_table = fealnx_pci_tbl,
- .probe = fealnx_init_one,
- .remove = fealnx_remove_one,
-};
-
-module_pci_driver(fealnx_driver);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ce866ae3df03..f1e80d6996ef 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -29,6 +29,7 @@ config FEC
select CRC32
select PHYLIB
select PAGE_POOL
+ select PAGE_POOL_STATS
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index 0e1439fd00bd..2b560661c82a 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -2,8 +2,8 @@
menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
+ select PCS_LYNX
help
Data Path Acceleration Architecture Ethernet driver,
supporting the Freescale QorIQ chips.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index fc68a32ce2f7..3f8032947d86 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -264,8 +264,19 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
- mac_dev->net_dev = net_dev;
+ /* The rest of the config is filled in by the mac device already */
+ mac_dev->phylink_config.dev = &net_dev->dev;
+ mac_dev->phylink_config.type = PHYLINK_NETDEV;
mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+ mac_dev->phylink = phylink_create(&mac_dev->phylink_config,
+ dev_fwnode(mac_dev->dev),
+ mac_dev->phy_if,
+ mac_dev->phylink_ops);
+ if (IS_ERR(mac_dev->phylink)) {
+ err = PTR_ERR(mac_dev->phylink);
+ dev_err_probe(dev, err, "Could not create phylink\n");
+ return err;
+ }
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
@@ -273,6 +284,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() = %d\n", err);
+ phylink_destroy(mac_dev->phylink);
return err;
}
@@ -294,8 +306,7 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
+ phylink_stop(mac_dev->phylink);
mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -304,8 +315,7 @@ static int dpaa_stop(struct net_device *net_dev)
err = error;
}
- if (net_dev->phydev)
- phy_disconnect(net_dev->phydev);
+ phylink_disconnect_phy(mac_dev->phylink);
net_dev->phydev = NULL;
msleep(200);
@@ -833,10 +843,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
/* Set different thresholds based on the configured MAC speed.
* This may turn suboptimal if the MAC is reconfigured at another
- * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up
* callback.
*/
- if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD)
cs_th = DPAA_CS_THRESHOLD_10G;
else
cs_th = DPAA_CS_THRESHOLD_1G;
@@ -865,7 +875,7 @@ out_error:
static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
{
- struct net_device *net_dev = mac_dev->net_dev;
+ struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev);
struct dpaa_priv *priv = netdev_priv(net_dev);
struct qm_mcc_initcgr opts = { };
u32 cs_th;
@@ -2904,58 +2914,6 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
}
}
-static void dpaa_adjust_link(struct net_device *net_dev)
-{
- struct mac_device *mac_dev;
- struct dpaa_priv *priv;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
- mac_dev->adjust_link(mac_dev);
-}
-
-/* The Aquantia PHYs are capable of performing rate adaptation */
-#define PHY_VEND_AQUANTIA 0x03a1b400
-#define PHY_VEND_AQUANTIA2 0x31c31c00
-
-static int dpaa_phy_init(struct net_device *net_dev)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct mac_device *mac_dev;
- struct phy_device *phy_dev;
- struct dpaa_priv *priv;
- u32 phy_vendor;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
- &dpaa_adjust_link, 0,
- mac_dev->phy_if);
- if (!phy_dev) {
- netif_err(priv, ifup, net_dev, "init_phy() failed\n");
- return -ENODEV;
- }
-
- phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
- /* Unless the PHY is capable of rate adaptation */
- if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- (phy_vendor != PHY_VEND_AQUANTIA &&
- phy_vendor != PHY_VEND_AQUANTIA2)) {
- /* remove any features not supported by the controller */
- ethtool_convert_legacy_u32_to_link_mode(mask,
- mac_dev->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, mask);
- }
-
- phy_support_asym_pause(phy_dev);
-
- mac_dev->phy_dev = phy_dev;
- net_dev->phydev = phy_dev;
-
- return 0;
-}
-
static int dpaa_open(struct net_device *net_dev)
{
struct mac_device *mac_dev;
@@ -2966,7 +2924,8 @@ static int dpaa_open(struct net_device *net_dev)
mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
- err = dpaa_phy_init(net_dev);
+ err = phylink_of_phy_connect(mac_dev->phylink,
+ mac_dev->dev->of_node, 0);
if (err)
goto phy_init_failed;
@@ -2981,7 +2940,7 @@ static int dpaa_open(struct net_device *net_dev)
netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
- phy_start(priv->mac_dev->phy_dev);
+ phylink_start(mac_dev->phylink);
netif_tx_start_all_queues(net_dev);
@@ -2990,6 +2949,7 @@ static int dpaa_open(struct net_device *net_dev)
mac_start_failed:
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
fman_port_disable(mac_dev->port[i]);
+ phylink_disconnect_phy(mac_dev->phylink);
phy_init_failed:
dpaa_eth_napi_disable(priv);
@@ -3145,10 +3105,12 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
int ret = -EINVAL;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
if (cmd == SIOCGMIIREG) {
if (net_dev->phydev)
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
+ cmd);
}
if (cmd == SIOCSHWTSTAMP)
@@ -3551,6 +3513,7 @@ static int dpaa_remove(struct platform_device *pdev)
dev_set_drvdata(dev, NULL);
unregister_netdev(net_dev);
+ phylink_destroy(priv->mac_dev->phylink);
err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 769e936a263c..9c71cbbb13d8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -54,27 +54,19 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- if (!net_dev->phydev)
- return 0;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- phy_ethtool_ksettings_get(net_dev->phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
}
static int dpaa_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- int err;
-
- if (!net_dev->phydev)
- return -ENODEV;
-
- err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
- if (err < 0)
- netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- return err;
+ return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
}
static void dpaa_get_drvinfo(struct net_device *net_dev,
@@ -99,80 +91,28 @@ static void dpaa_set_msglevel(struct net_device *net_dev,
static int dpaa_nway_reset(struct net_device *net_dev)
{
- int err;
-
- if (!net_dev->phydev)
- return -ENODEV;
-
- err = 0;
- if (net_dev->phydev->autoneg) {
- err = phy_start_aneg(net_dev->phydev);
- if (err < 0)
- netdev_err(net_dev, "phy_start_aneg() = %d\n",
- err);
- }
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- return err;
+ return phylink_ethtool_nway_reset(mac_dev->phylink);
}
static void dpaa_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *epause)
{
- struct mac_device *mac_dev;
- struct dpaa_priv *priv;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- if (!net_dev->phydev)
- return;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- epause->autoneg = mac_dev->autoneg_pause;
- epause->rx_pause = mac_dev->rx_pause_active;
- epause->tx_pause = mac_dev->tx_pause_active;
+ phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
}
static int dpaa_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *epause)
{
- struct mac_device *mac_dev;
- struct phy_device *phydev;
- bool rx_pause, tx_pause;
- struct dpaa_priv *priv;
- int err;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- phydev = net_dev->phydev;
- if (!phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
- return -ENODEV;
- }
-
- if (!phy_validate_pause(phydev, epause))
- return -EINVAL;
-
- /* The MAC should know how to handle PAUSE frame autonegotiation before
- * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
- * settings.
- */
- mac_dev->autoneg_pause = !!epause->autoneg;
- mac_dev->rx_pause_req = !!epause->rx_pause;
- mac_dev->tx_pause_req = !!epause->tx_pause;
-
- /* Determine the sym/asym advertised PAUSE capabilities from the desired
- * rx/tx pause settings.
- */
-
- phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- return err;
+ return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
}
static int dpaa_get_sset_count(struct net_device *net_dev, int type)
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 3d9842af7f10..1b05ba8d1cbf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 8356af4631fd..1af254caeb0d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
- "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n",
+ "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs",
"Avg Frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
- ch->ch_id,
+ seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n",
+ "CH#", i, ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
ch->stats.frames,
@@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
+static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ int i, j, num_queues, buf_cnt;
+ struct dpaa2_eth_bp *bp;
+ char ch_name[10];
+ int err;
+
+ /* Print out the header */
+ seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count");
+ num_queues = dpaa2_eth_queue_count(priv);
+ for (i = 0; i < num_queues; i++) {
+ snprintf(ch_name, sizeof(ch_name), "CH#%d", i);
+ seq_printf(file, "%10s", ch_name);
+ }
+ seq_printf(file, "\n");
+
+ /* For each buffer pool, print out its BPID, the number of buffers in
+ * that buffer pool and the channels which are using it.
+ */
+ for (i = 0; i < priv->num_bps; i++) {
+ bp = priv->bp[i];
+
+ err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "Buffer count query error %d\n", err);
+ return err;
+ }
+
+ seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt);
+ for (j = 0; j < num_queues; j++) {
+ if (priv->channel[j]->bp == bp)
+ seq_printf(file, "%10s", "x");
+ else
+ seq_printf(file, "%10s", "");
+ }
+ seq_printf(file, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp);
+
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpni_dev;
@@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
/* per-fq stats file */
debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
+
+ /* per buffer pool stats file */
+ debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops);
+
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 7fefe1574b6a..76f808d38066 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -37,18 +37,9 @@ static int dpaa2_eth_dl_info_get(struct devlink *devlink,
struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
char buf[10];
- int err;
-
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
- err = devlink_info_version_running_put(req, "dpni", buf);
- if (err)
- return err;
-
- return 0;
+ return devlink_info_version_running_put(req, "dpni", buf);
}
static struct dpaa2_eth_trap_item *
@@ -226,25 +217,16 @@ int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
{
struct devlink_port *devlink_port = &priv->devlink_port;
struct devlink_port_attrs attrs = {};
- int err;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(devlink_port, &attrs);
-
- err = devlink_port_register(priv->devlink, devlink_port, 0);
- if (err)
- return err;
-
- devlink_port_type_eth_set(devlink_port, priv->net_dev);
-
- return 0;
+ return devlink_port_register(priv->devlink, devlink_port, 0);
}
void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
{
struct devlink_port *devlink_port = &priv->devlink_port;
- devlink_port_type_clear(devlink_port);
devlink_port_unregister(devlink_port);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
index 5fb5f14e01ec..9b43fadb9b11 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
TP_ARGS(netdev, fd)
);
+/* Tx (egress) XSK fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
/* Rx fd */
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
TP_PROTO(struct net_device *netdev,
@@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
TP_ARGS(netdev, fd)
);
+/* Rx XSK fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
/* Tx confirmation fd */
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
TP_PROTO(struct net_device *netdev,
@@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
);
/* Log data about raw buffers. Useful for tracing DPBP content. */
-TRACE_EVENT(dpaa2_eth_buf_seed,
- /* Trace function prototype */
- TP_PROTO(struct net_device *netdev,
- /* virtual address and size */
- void *vaddr,
- size_t size,
- /* dma map address and size */
- dma_addr_t dma_addr,
- size_t map_size,
- /* buffer pool id, if relevant */
- u16 bpid),
-
- /* Repeat argument list here */
- TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
-
- /* A structure containing the relevant information we want
- * to record. Declare name and type for each normal element,
- * name, type and size for arrays. Use __string for variable
- * length strings.
- */
- TP_STRUCT__entry(
- __field(void *, vaddr)
- __field(size_t, size)
- __field(dma_addr_t, dma_addr)
- __field(size_t, map_size)
- __field(u16, bpid)
- __string(name, netdev->name)
- ),
-
- /* The function that assigns values to the above declared
- * fields
- */
- TP_fast_assign(
- __entry->vaddr = vaddr;
- __entry->size = size;
- __entry->dma_addr = dma_addr;
- __entry->map_size = map_size;
- __entry->bpid = bpid;
- __assign_str(name, netdev->name);
- ),
-
- /* This is what gets printed when the trace event is
- * triggered.
- */
- TP_printk(TR_BUF_FMT,
- __get_str(name),
- __entry->vaddr,
- __entry->size,
- &__entry->dma_addr,
- __entry->map_size,
- __entry->bpid)
+DECLARE_EVENT_CLASS(dpaa2_eth_buf,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* Main memory buff seeding */
+DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed,
+ TP_PROTO(struct net_device *netdev,
+ void *vaddr,
+ size_t size,
+ dma_addr_t dma_addr,
+ size_t map_size,
+ u16 bpid),
+
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
+);
+
+/* UMEM buff seeding on AF_XDP fast path */
+DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed,
+ TP_PROTO(struct net_device *netdev,
+ void *vaddr,
+ size_t size,
+ dma_addr_t dma_addr,
+ size_t map_size,
+ u16 bpid),
+
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
);
/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 8d029addddad..0c35abb7d065 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -8,7 +8,6 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/interrupt.h>
-#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
#include <linux/fsl/mc.h>
@@ -19,6 +18,7 @@
#include <net/pkt_cls.h>
#include <net/sock.h>
#include <net/tso.h>
+#include <net/xdp_sock_drv.h>
#include "dpaa2-eth.h"
@@ -104,8 +104,8 @@ static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
}
-static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
- dma_addr_t iova_addr)
+void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
{
phys_addr_t phys_addr;
@@ -279,23 +279,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
* be released in the pool
*/
static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
- int count)
+ int count, bool xsk_zc)
{
struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_swa *swa;
+ struct xdp_buff *xdp_buff;
void *vaddr;
int i;
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vaddr, 0);
+
+ if (!xsk_zc) {
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ swa = (struct dpaa2_eth_swa *)
+ (vaddr + DPAA2_ETH_RX_HWA_SIZE);
+ xdp_buff = swa->xsk.xdp_buff;
+ xsk_buff_free(xdp_buff);
+ }
}
}
-static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
@@ -304,7 +314,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
return;
- while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
ch->recycled_bufs,
ch->recycled_bufs_cnt)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
@@ -313,7 +323,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
}
if (err) {
- dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
+ ch->recycled_bufs_cnt, ch->xsk_zc);
ch->buf_count -= ch->recycled_bufs_cnt;
}
@@ -377,10 +388,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
fq->xdp_tx_fds.num = 0;
}
-static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
@@ -485,19 +496,15 @@ out:
return xdp_act;
}
-static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
+struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, u32 fd_length,
+ void *fd_vaddr)
{
u16 fd_offset = dpaa2_fd_get_offset(fd);
- struct dpaa2_eth_priv *priv = ch->priv;
- u32 fd_length = dpaa2_fd_get_len(fd);
struct sk_buff *skb = NULL;
unsigned int skb_len;
- if (fd_length > priv->rx_copybreak)
- return NULL;
-
skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
skb = napi_alloc_skb(&ch->napi, skb_len);
@@ -514,11 +521,66 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
return skb;
}
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
+}
+
+void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, void *vaddr,
+ struct dpaa2_eth_fq *fq,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct sk_buff *skb)
+{
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+
+ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ status = le32_to_cpu(fas->status);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, fq->flowid);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
+
+ list_add_tail(&skb->list, ch->rx_list);
+}
+
/* Main Rx frame processing routine */
-static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- struct dpaa2_eth_fq *fq)
+void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -527,9 +589,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_fas *fas;
void *buf_data;
- u32 status = 0;
u32 xdp_act;
/* Tracing point */
@@ -539,8 +599,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- fas = dpaa2_get_fas(vaddr, false);
- prefetch(fas);
buf_data = vaddr + dpaa2_fd_get_offset(fd);
prefetch(buf_data);
@@ -578,35 +636,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
if (unlikely(!skb))
goto err_build_skb;
- prefetch(skb->data);
-
- /* Get the timestamp value */
- if (priv->rx_tstamp) {
- struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- __le64 *ts = dpaa2_get_ts(vaddr, false);
- u64 ns;
-
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
- }
-
- /* Check if we need to validate the L4 csum */
- if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
- status = le32_to_cpu(fas->status);
- dpaa2_eth_validate_rx_csum(priv, status, skb);
- }
-
- skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, fq->flowid);
-
- percpu_stats->rx_packets++;
- percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
-
- list_add_tail(&skb->list, ch->rx_list);
-
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
return;
err_build_skb:
@@ -827,7 +857,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
}
}
-static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_sgt_cache *sgt_cache;
void *sgt_buf = NULL;
@@ -849,7 +879,7 @@ static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
return sgt_buf;
}
-static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
{
struct dpaa2_eth_sgt_cache *sgt_cache;
@@ -1084,9 +1114,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- const struct dpaa2_fd *fd, bool in_napi)
+void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr;
@@ -1153,6 +1184,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
if (!swa->tso.is_last_fd)
should_free_skb = 0;
+ } else if (swa->type == DPAA2_ETH_SWA_XSK) {
+ /* Unmap the SGT Buffer */
+ dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
+ DMA_BIDIRECTIONAL);
} else {
skb = swa->single.skb;
@@ -1170,6 +1205,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
return;
}
+ if (swa->type == DPAA2_ETH_SWA_XSK) {
+ ch->xsk_tx_pkts_sent++;
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+ return;
+ }
+
if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
fq->dq_frames++;
fq->dq_bytes += fd_len;
@@ -1344,7 +1385,7 @@ err_alloc_tso_hdr:
err_sgt_get:
/* Free all the other FDs that were already fully created */
for (i = 0; i < index; i++)
- dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
+ dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
return err;
}
@@ -1460,7 +1501,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- dpaa2_eth_free_tx_fd(priv, fq, fd, false);
+ dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
percpu_stats->tx_packets += total_enqueued;
@@ -1553,7 +1594,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- dpaa2_eth_free_tx_fd(priv, fq, fd, true);
+ dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -1631,44 +1672,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
* to the specified buffer pool
*/
static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch, u16 bpid)
+ struct dpaa2_eth_channel *ch)
{
+ struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ struct dpaa2_eth_swa *swa;
struct page *page;
dma_addr_t addr;
int retries = 0;
- int i, err;
-
- for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
- /* Allocate buffer visible to WRIOP + skb shared info +
- * alignment padding
- */
- /* allocate one page for each Rx buffer. WRIOP sees
- * the entire page except for a tailroom reserved for
- * skb shared info
+ int i = 0, err;
+ u32 batch;
+
+ /* Allocate buffers visible to WRIOP */
+ if (!ch->xsk_zc) {
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Also allocate skb shared info and alignment padding.
+ * There is one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
+ goto err_alloc;
+
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
+ page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ ch->bp->bpid);
+ }
+ } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
+ /* Allocate XSK buffers for AF_XDP fast path in batches
+ * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
+ * provide enough buffers at the moment
*/
- page = dev_alloc_pages(0);
- if (!page)
+ batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
+ DPAA2_ETH_BUFS_PER_CMD);
+ if (!batch)
goto err_alloc;
- addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr)))
- goto err_map;
+ for (i = 0; i < batch; i++) {
+ swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
+ DPAA2_ETH_RX_HWA_SIZE);
+ swa->xsk.xdp_buff = xdp_buffs[i];
+
+ addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
- buf_array[i] = addr;
+ buf_array[i] = addr;
- /* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
- DPAA2_ETH_RX_BUF_RAW_SIZE,
- addr, priv->rx_buf_size,
- bpid);
+ trace_dpaa2_xsk_buf_seed(priv->net_dev,
+ xdp_buffs[i]->data_hard_start,
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ ch->bp->bpid);
+ }
}
release_bufs:
/* In case the portal is busy, retry until successful */
- while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
buf_array, i)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
break;
@@ -1679,14 +1752,19 @@ release_bufs:
* not much else we can do about it
*/
if (err) {
- dpaa2_eth_free_bufs(priv, buf_array, i);
+ dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
return 0;
}
return i;
err_map:
- __free_pages(page, 0);
+ if (!ch->xsk_zc) {
+ __free_pages(page, 0);
+ } else {
+ for (; i < batch; i++)
+ xsk_buff_free(xdp_buffs[i]);
+ }
err_alloc:
/* If we managed to allocate at least some buffers,
* release them to hardware
@@ -1697,39 +1775,64 @@ err_alloc:
return 0;
}
-static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
{
- int i, j;
+ int i;
int new_count;
- for (j = 0; j < priv->num_channels; j++) {
- for (i = 0; i < DPAA2_ETH_NUM_BUFS;
- i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
- priv->channel[j]->buf_count += new_count;
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = dpaa2_eth_add_bufs(priv, ch);
+ ch->buf_count += new_count;
- if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
- return -ENOMEM;
- }
- }
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD)
+ return -ENOMEM;
}
return 0;
}
+static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct dpaa2_eth_channel *channel;
+ int i, err = 0;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+
+ err = dpaa2_eth_seed_pool(priv, channel);
+
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ if (err)
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ channel->bp->dev->obj_desc.id,
+ channel->bp->bpid);
+ }
+}
+
/*
- * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * Drain the specified number of buffers from one of the DPNI's private buffer
+ * pools.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
-static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
+ int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ bool xsk_zc = false;
int retries = 0;
- int ret;
+ int i, ret;
+
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->channel[i]->bp->bpid == bpid)
+ xsk_zc = priv->channel[i]->xsk_zc;
do {
- ret = dpaa2_io_service_acquire(NULL, priv->bpid,
- buf_array, count);
+ ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
if (ret < 0) {
if (ret == -EBUSY &&
retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
@@ -1737,28 +1840,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- dpaa2_eth_free_bufs(priv, buf_array, ret);
+ dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
retries = 0;
} while (ret);
}
-static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
{
int i;
- dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- dpaa2_eth_drain_bufs(priv, 1);
+ /* Drain the buffer pool */
+ dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, bpid, 1);
+ /* Setup to zero the buffer count of all channels which were
+ * using this buffer pool.
+ */
for (i = 0; i < priv->num_channels; i++)
- priv->channel[i]->buf_count = 0;
+ if (priv->channel[i]->bp->bpid == bpid)
+ priv->channel[i]->buf_count = 0;
+}
+
+static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_bps; i++)
+ dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
}
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- u16 bpid)
+ struct dpaa2_eth_channel *ch)
{
int new_count;
@@ -1766,7 +1881,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
+ new_count = dpaa2_eth_add_bufs(priv, ch);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -1830,6 +1945,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ bool work_done_zc = false;
struct list_head rx_list;
int retries = 0;
u16 flowid;
@@ -1842,13 +1958,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
INIT_LIST_HEAD(&rx_list);
ch->rx_list = &rx_list;
+ if (ch->xsk_zc) {
+ work_done_zc = dpaa2_xsk_tx(priv, ch);
+ /* If we reached the XSK Tx per NAPI threshold, we're done */
+ if (work_done_zc) {
+ work_done = budget;
+ goto out;
+ }
+ }
+
do {
err = dpaa2_eth_pull_channel(ch);
if (unlikely(err))
break;
/* Refill pool if appropriate */
- dpaa2_eth_refill_pool(priv, ch, priv->bpid);
+ dpaa2_eth_refill_pool(priv, ch);
store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
@@ -1894,6 +2019,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
out:
netif_receive_skb_list(ch->rx_list);
+ if (ch->xsk_tx_pkts_sent) {
+ xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
+ ch->xsk_tx_pkts_sent = 0;
+ }
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -2017,8 +2147,11 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
+ * We can avoid locking because we are called from the "link changed"
+ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
+ * (the writer to priv->mac), so we cannot race with it.
*/
- if (dpaa2_eth_is_type_phy(priv))
+ if (dpaa2_mac_is_type_phy(priv->mac))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -2047,15 +2180,9 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
- err = dpaa2_eth_seed_pool(priv, priv->bpid);
- if (err) {
- /* Not much to do; the buffer pool, though not filled up,
- * may still contain some buffers which would enable us
- * to limp on.
- */
- netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
- priv->dpbp_dev->obj_desc.id, priv->bpid);
- }
+ dpaa2_eth_seed_pools(priv);
+
+ mutex_lock(&priv->mac_lock);
if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
@@ -2075,20 +2202,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
+ mutex_unlock(&priv->mac_lock);
netdev_err(net_dev, "dpni_enable() failed\n");
goto enable_err;
}
- if (dpaa2_eth_is_type_phy(priv)) {
+ if (dpaa2_eth_is_type_phy(priv))
dpaa2_mac_start(priv->mac);
- phylink_start(priv->mac->phylink);
- }
+
+ mutex_unlock(&priv->mac_lock);
return 0;
enable_err:
dpaa2_eth_disable_ch_napi(priv);
- dpaa2_eth_drain_pool(priv);
+ dpaa2_eth_drain_pools(priv);
return err;
}
@@ -2155,14 +2283,17 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
+ mutex_lock(&priv->mac_lock);
+
if (dpaa2_eth_is_type_phy(priv)) {
- phylink_stop(priv->mac->phylink);
dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
}
+ mutex_unlock(&priv->mac_lock);
+
/* On dpni_disable(), the MC firmware will:
* - stop MAC Rx and wait for all Rx frames to be enqueued to software
* - cut off WRIOP dequeues from egress FQs and wait until transmission
@@ -2193,7 +2324,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
- dpaa2_eth_drain_pool(priv);
+ dpaa2_eth_drain_pools(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
@@ -2488,12 +2619,20 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
return -EOPNOTSUPP;
}
@@ -2602,7 +2741,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
need_update = (!!priv->xdp_prog != !!prog);
if (up)
- dpaa2_eth_stop(dev);
+ dev_close(dev);
/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
* Also, when switching between xdp/non-xdp modes we need to reconfigure
@@ -2630,7 +2769,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
}
if (up) {
- err = dpaa2_eth_open(dev);
+ err = dev_open(dev, NULL);
if (err)
return err;
}
@@ -2641,7 +2780,7 @@ out_err:
if (prog)
bpf_prog_sub(prog, priv->num_channels);
if (up)
- dpaa2_eth_open(dev);
+ dev_open(dev, NULL);
return err;
}
@@ -2651,6 +2790,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return dpaa2_eth_setup_xdp(dev, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2881,6 +3022,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+ .ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
.ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
@@ -2895,7 +3037,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
/* Update NAPI statistics */
ch->stats.cdan++;
- napi_schedule(&ch->napi);
+ /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
+ * so that it can be rescheduled again.
+ */
+ if (!napi_if_scheduled_mark_missed(&ch->napi))
+ napi_schedule(&ch->napi);
}
/* Allocate and configure a DPCON object */
@@ -2908,10 +3054,12 @@ static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- if (err == -ENXIO)
+ if (err == -ENXIO) {
+ dev_dbg(dev, "Waiting for DPCON\n");
err = -EPROBE_DEFER;
- else
+ } else {
dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ }
return ERR_PTR(err);
}
@@ -3021,7 +3169,9 @@ static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
channel = dpaa2_eth_alloc_channel(priv);
if (IS_ERR_OR_NULL(channel)) {
err = PTR_ERR_OR_ZERO(channel);
- if (err != -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER)
+ dev_dbg(dev, "waiting for affine channel\n");
+ else
dev_info(dev,
"No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
@@ -3204,13 +3354,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
dpaa2_eth_set_fq_affinity(priv);
}
-/* Allocate and configure one buffer pool for each interface */
-static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
+/* Allocate and configure a buffer pool */
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
{
- int err;
- struct fsl_mc_device *dpbp_dev;
struct device *dev = priv->net_dev->dev.parent;
+ struct fsl_mc_device *dpbp_dev;
struct dpbp_attr dpbp_attrs;
+ struct dpaa2_eth_bp *bp;
+ int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
@@ -3219,12 +3370,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
err = -EPROBE_DEFER;
else
dev_err(dev, "DPBP device allocation failed\n");
- return err;
+ return ERR_PTR(err);
}
- priv->dpbp_dev = dpbp_dev;
+ bp = kzalloc(sizeof(*bp), GFP_KERNEL);
+ if (!bp) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
- err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+ err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
&dpbp_dev->mc_handle);
if (err) {
dev_err(dev, "dpbp_open() failed\n");
@@ -3249,9 +3404,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
dev_err(dev, "dpbp_get_attributes() failed\n");
goto err_get_attr;
}
- priv->bpid = dpbp_attrs.bpid;
- return 0;
+ bp->dev = dpbp_dev;
+ bp->bpid = dpbp_attrs.bpid;
+
+ return bp;
err_get_attr:
dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
@@ -3259,17 +3416,58 @@ err_enable:
err_reset:
dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
err_open:
+ kfree(bp);
+err_alloc:
fsl_mc_object_free(dpbp_dev);
- return err;
+ return ERR_PTR(err);
}
-static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
{
- dpaa2_eth_drain_pool(priv);
- dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
- dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
- fsl_mc_object_free(priv->dpbp_dev);
+ struct dpaa2_eth_bp *bp;
+ int i;
+
+ bp = dpaa2_eth_allocate_dpbp(priv);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
+ priv->num_bps++;
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->bp = bp;
+
+ return 0;
+}
+
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
+{
+ int idx_bp;
+
+ /* Find the index at which this BP is stored */
+ for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
+ if (priv->bp[idx_bp] == bp)
+ break;
+
+ /* Drain the pool and disable the associated MC object */
+ dpaa2_eth_drain_pool(priv, bp->bpid);
+ dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
+ fsl_mc_object_free(bp->dev);
+ kfree(bp);
+
+ /* Move the last in use DPBP over in this position */
+ priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
+ priv->num_bps--;
+}
+
+static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_bps; i++)
+ dpaa2_eth_free_dpbp(priv, priv->bp[i]);
}
static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
@@ -3610,7 +3808,7 @@ static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
priv->dpni_ver_major, priv->dpni_ver_minor,
DPNI_VER_MAJOR, DPNI_VER_MINOR);
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto close;
}
@@ -4154,15 +4352,16 @@ out:
*/
static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
+ struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
struct net_device *net_dev = priv->net_dev;
+ struct dpni_pools_cfg pools_params = { 0 };
struct device *dev = net_dev->dev.parent;
- struct dpni_pools_cfg pools_params;
struct dpni_error_cfg err_cfg;
int err = 0;
int i;
pools_params.num_dpbp = 1;
- pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
@@ -4426,8 +4625,10 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
- if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
+ netdev_dbg(priv->net_dev, "waiting for mac\n");
return PTR_ERR(dpmac_dev);
+ }
if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
@@ -4443,22 +4644,29 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
err = dpaa2_mac_open(mac);
if (err)
goto err_free_mac;
- priv->mac = mac;
- if (dpaa2_eth_is_type_phy(priv)) {
+ if (dpaa2_mac_is_type_phy(mac)) {
err = dpaa2_mac_connect(mac);
- if (err && err != -EPROBE_DEFER)
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
- ERR_PTR(err));
- if (err)
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ netdev_dbg(priv->net_dev,
+ "could not connect to MAC\n");
+ else
+ netdev_err(priv->net_dev,
+ "Error connecting to the MAC endpoint: %pe",
+ ERR_PTR(err));
goto err_close_mac;
+ }
}
+ mutex_lock(&priv->mac_lock);
+ priv->mac = mac;
+ mutex_unlock(&priv->mac_lock);
+
return 0;
err_close_mac:
dpaa2_mac_close(mac);
- priv->mac = NULL;
err_free_mac:
kfree(mac);
return err;
@@ -4466,15 +4674,21 @@ err_free_mac:
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (dpaa2_eth_is_type_phy(priv))
- dpaa2_mac_disconnect(priv->mac);
+ struct dpaa2_mac *mac;
+
+ mutex_lock(&priv->mac_lock);
+ mac = priv->mac;
+ priv->mac = NULL;
+ mutex_unlock(&priv->mac_lock);
- if (!dpaa2_eth_has_mac(priv))
+ if (!mac)
return;
- dpaa2_mac_close(priv->mac);
- kfree(priv->mac);
- priv->mac = NULL;
+ if (dpaa2_mac_is_type_phy(mac))
+ dpaa2_mac_disconnect(mac);
+
+ dpaa2_mac_close(mac);
+ kfree(mac);
}
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
@@ -4484,6 +4698,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ bool had_mac;
int err;
err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -4500,12 +4715,15 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
dpaa2_eth_update_tx_fqids(priv);
- rtnl_lock();
- if (dpaa2_eth_has_mac(priv))
+ /* We can avoid locking because the "endpoint changed" IRQ
+ * handler is the only one who changes priv->mac at runtime,
+ * so we are not racing with anyone.
+ */
+ had_mac = !!priv->mac;
+ if (had_mac)
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
- rtnl_unlock();
}
return IRQ_HANDLED;
@@ -4601,6 +4819,9 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv = netdev_priv(net_dev);
priv->net_dev = net_dev;
+ SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
+
+ mutex_init(&priv->mac_lock);
priv->iommu_domain = iommu_get_domain_for_dev(dev);
@@ -4623,10 +4844,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
if (err) {
- if (err == -ENXIO)
+ if (err == -ENXIO) {
+ dev_dbg(dev, "waiting for MC portal\n");
err = -EPROBE_DEFER;
- else
+ } else {
dev_err(dev, "MC portal allocation failed\n");
+ }
goto err_portal_alloc;
}
@@ -4641,7 +4864,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_eth_setup_fqs(priv);
- err = dpaa2_eth_setup_dpbp(priv);
+ err = dpaa2_eth_setup_default_dpbp(priv);
if (err)
goto err_dpbp_setup;
@@ -4707,6 +4930,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
#endif
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
err = dpaa2_eth_setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
@@ -4719,10 +4946,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->do_link_poll = true;
}
- err = dpaa2_eth_connect_mac(priv);
- if (err)
- goto err_connect_mac;
-
err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4756,13 +4979,13 @@ err_dl_port_add:
err_dl_trap_register:
dpaa2_eth_dl_free(priv);
err_dl_register:
- dpaa2_eth_disconnect_mac(priv);
-err_connect_mac:
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
dpaa2_eth_free_rings(priv);
err_alloc_rings:
err_csum:
@@ -4777,7 +5000,7 @@ err_alloc_percpu_extras:
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
err_bind:
- dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
dpaa2_eth_free_dpio(priv);
err_dpio_setup:
@@ -4810,9 +5033,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#endif
unregister_netdev(net_dev);
- rtnl_lock();
- dpaa2_eth_disconnect_mac(priv);
- rtnl_unlock();
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
@@ -4823,6 +5043,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
else
fsl_mc_free_irqs(ls_dev);
+ dpaa2_eth_disconnect_mac(priv);
dpaa2_eth_free_rings(priv);
free_percpu(priv->fd);
free_percpu(priv->sgt_cache);
@@ -4830,7 +5051,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
- dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
if (priv->onestep_reg_base)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 447718483ef4..d56d7a13262e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
*/
#ifndef __DPAA2_ETH_H
@@ -53,6 +53,12 @@
*/
#define DPAA2_ETH_TXCONF_PER_NAPI 256
+/* Maximum number of Tx frames to be processed in a single NAPI
+ * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
+ * to maximize the throughput.
+ */
+#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI
+
/* Buffer qouta per channel. We want to keep in check number of ingress frames
* in flight: for small sized frames, congestion group taildrop may kick in
* first; for large sizes, Rx FQ taildrop threshold will ensure only a
@@ -109,6 +115,14 @@
#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
#define DPAA2_ETH_RX_BUF_ALIGN 64
+/* The firmware allows assigning multiple buffer pools to a single DPNI -
+ * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
+ * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
+ * object: the default and 8 other distinct buffer pools, one for each queue.
+ */
+#define DPAA2_ETH_DEFAULT_BP_IDX 0
+#define DPAA2_ETH_MAX_BPS 9
+
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
* options are either 0 or 64, so we choose the latter.
@@ -122,6 +136,7 @@ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_XSK,
DPAA2_ETH_SWA_SW_TSO,
};
@@ -144,6 +159,10 @@ struct dpaa2_eth_swa {
struct xdp_frame *xdpf;
} xdp;
struct {
+ struct xdp_buff *xdp_buff;
+ int sgt_size;
+ } xsk;
+ struct {
struct sk_buff *skb;
int num_sg;
int sgt_size;
@@ -421,12 +440,19 @@ enum dpaa2_eth_fq_type {
};
struct dpaa2_eth_priv;
+struct dpaa2_eth_channel;
+struct dpaa2_eth_fq;
struct dpaa2_eth_xdp_fds {
struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
ssize_t num;
};
+typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -439,10 +465,7 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
- void (*consume)(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- struct dpaa2_eth_fq *fq);
+ dpaa2_eth_consume_cb_t *consume;
struct dpaa2_eth_fq_stats stats;
struct dpaa2_eth_xdp_fds xdp_redirect_fds;
@@ -454,6 +477,11 @@ struct dpaa2_eth_ch_xdp {
unsigned int res;
};
+struct dpaa2_eth_bp {
+ struct fsl_mc_device *dev;
+ int bpid;
+};
+
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
@@ -472,6 +500,11 @@ struct dpaa2_eth_channel {
/* Buffers to be recycled back in the buffer pool */
u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
int recycled_bufs_cnt;
+
+ bool xsk_zc;
+ int xsk_tx_pkts_sent;
+ struct xsk_buff_pool *xsk_pool;
+ struct dpaa2_eth_bp *bp;
};
struct dpaa2_eth_dist_fields {
@@ -506,7 +539,7 @@ struct dpaa2_eth_trap_data {
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
-#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 256
struct dpaa2_eth_fds {
struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
};
@@ -535,14 +568,16 @@ struct dpaa2_eth_priv {
u8 ptp_correction_off;
void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
u32 offset, u8 udp);
- struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
- u16 bpid;
struct iommu_domain *iommu_domain;
enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
bool rx_tstamp; /* Rx timestamping enabled */
+ /* Buffer pool management */
+ struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS];
+ int num_bps;
+
u16 tx_qdid;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
@@ -580,6 +615,8 @@ struct dpaa2_eth_priv {
#endif
struct dpaa2_mac *mac;
+ /* Serializes changes to priv->mac */
+ struct mutex mac_lock;
struct workqueue_struct *dpaa2_ptp_wq;
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
@@ -733,16 +770,15 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
{
- if (priv->mac &&
- (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
- priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
- return true;
+ lockdep_assert_held(&priv->mac_lock);
- return false;
+ return dpaa2_mac_is_type_phy(priv->mac);
}
static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
{
+ lockdep_assert_held(&priv->mac_lock);
+
return priv->mac ? true : false;
}
@@ -771,4 +807,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
struct dpaa2_fapr *fapr);
+
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);
+
+struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, u32 fd_length,
+ void *fd_vaddr);
+
+void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, void *vaddr,
+ struct dpaa2_eth_fq *fq,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct sk_buff *skb);
+
+void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_bp *bp);
+
+void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr);
+void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr);
+
+void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id);
+
+int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi);
+bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch);
+
+/* SGT (Scatter-Gather Table) cache management */
+void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);
+
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index eea7d7a07c00..e80e9388c71f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- * Copyright 2020 NXP
+ * Copyright 2016-2022 NXP
*/
#include <linux/net_tstamp.h>
@@ -86,11 +85,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_nway_reset(priv->mac->phylink);
+ err = phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ mutex_unlock(&priv->mac_lock);
- return -EOPNOTSUPP;
+ return err;
}
static int
@@ -98,10 +102,18 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_ksettings_get(priv->mac->phylink,
- link_settings);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
link_settings->base.autoneg = AUTONEG_DISABLE;
if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
@@ -116,11 +128,17 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *link_settings)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ err = phylink_ethtool_ksettings_set(priv->mac->phylink,
+ link_settings);
- if (!dpaa2_eth_is_type_phy(priv))
- return -ENOTSUPP;
+ mutex_unlock(&priv->mac_lock);
- return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+ return err;
}
static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
@@ -129,11 +147,16 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
+ mutex_lock(&priv->mac_lock);
+
if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ mutex_unlock(&priv->mac_lock);
return;
}
+ mutex_unlock(&priv->mac_lock);
+
pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
pause->autoneg = AUTONEG_DISABLE;
@@ -152,9 +175,17 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_set_pauseparam(priv->mac->phylink,
- pause);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
+
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -186,7 +217,6 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct dpaa2_eth_priv *priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -200,22 +230,17 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (dpaa2_eth_has_mac(priv))
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(p);
break;
}
}
static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
{
- int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (dpaa2_eth_has_mac(priv))
- num_ss_stats += dpaa2_mac_get_sset_count();
- return num_ss_stats;
+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS +
+ dpaa2_mac_get_sset_count();
default:
return -EOPNOTSUPP;
}
@@ -227,17 +252,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- int i = 0;
- int j, k, err;
- int num_cnt;
- union dpni_statistics dpni_stats;
- u32 fcnt, bcnt;
- u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
- u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
- u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_eth_drv_stats *extras;
- struct dpaa2_eth_ch_stats *ch_stats;
+ union dpni_statistics dpni_stats;
int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
sizeof(dpni_stats.page_0),
sizeof(dpni_stats.page_1),
@@ -247,6 +263,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
sizeof(dpni_stats.page_5),
sizeof(dpni_stats.page_6),
};
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ struct dpaa2_eth_ch_stats *ch_stats;
+ struct dpaa2_eth_drv_stats *extras;
+ u32 buf_cnt, buf_cnt_total = 0;
+ int j, k, err, num_cnt, i = 0;
+ u32 fcnt, bcnt;
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
@@ -308,15 +331,22 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = fcnt_tx_total;
*(data + i++) = bcnt_tx_total;
- err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
- if (err) {
- netdev_warn(net_dev, "Buffer count query error %d\n", err);
- return;
+ for (j = 0; j < priv->num_bps; j++) {
+ err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ buf_cnt_total += buf_cnt;
}
- *(data + i++) = buf_cnt;
+ *(data + i++) = buf_cnt_total;
+
+ mutex_lock(&priv->mac_lock);
if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
+
+ mutex_unlock(&priv->mac_lock);
}
static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
@@ -876,6 +906,29 @@ restore_rx_usecs:
return err;
}
+static void dpaa2_eth_get_channels(struct net_device *net_dev,
+ struct ethtool_channels *channels)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int queue_count = dpaa2_eth_queue_count(priv);
+
+ channels->max_rx = queue_count;
+ channels->max_tx = queue_count;
+ channels->rx_count = queue_count;
+ channels->tx_count = queue_count;
+
+ /* Tx confirmation and Rx error */
+ channels->max_other = queue_count + 1;
+ channels->max_combined = channels->max_rx +
+ channels->max_tx +
+ channels->max_other;
+ /* Tx conf and Rx err */
+ channels->other_count = queue_count + 1;
+ channels->combined_count = channels->rx_count +
+ channels->tx_count +
+ channels->other_count;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -896,4 +949,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.set_tunable = dpaa2_eth_set_tunable,
.get_coalesce = dpaa2_eth_get_coalesce,
.set_coalesce = dpaa2_eth_set_coalesce,
+ .get_channels = dpaa2_eth_get_channels,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 49ff85633783..c886f33f8c6f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -105,6 +105,7 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
* thus the fwnode field is not yet set. Defer probe if we are
* facing this situation.
*/
+ dev_dbg(dev, "dprc not finished probing\n");
return ERR_PTR(-EPROBE_DEFER);
}
@@ -235,7 +236,6 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = dpaa2_mac_select_pcs,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
@@ -264,8 +264,10 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
mdiodev = fwnode_mdio_find_device(node);
fwnode_handle_put(node);
- if (!mdiodev)
+ if (!mdiodev) {
+ netdev_dbg(mac->net_dev, "missing PCS device\n");
return -EPROBE_DEFER;
+ }
mac->pcs = lynx_pcs_create(mdiodev);
if (!mac->pcs) {
@@ -336,12 +338,20 @@ static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
void dpaa2_mac_start(struct dpaa2_mac *mac)
{
+ ASSERT_RTNL();
+
if (mac->serdes_phy)
phy_power_on(mac->serdes_phy);
+
+ phylink_start(mac->phylink);
}
void dpaa2_mac_stop(struct dpaa2_mac *mac)
{
+ ASSERT_RTNL();
+
+ phylink_stop(mac->phylink);
+
if (mac->serdes_phy)
phy_power_off(mac->serdes_phy);
}
@@ -420,7 +430,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
}
mac->phylink = phylink;
+ rtnl_lock();
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
+ rtnl_unlock();
if (err) {
netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
goto err_phylink_destroy;
@@ -438,10 +450,10 @@ err_pcs_destroy:
void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
{
- if (!mac->phylink)
- return;
-
+ rtnl_lock();
phylink_disconnect_phy(mac->phylink);
+ rtnl_unlock();
+
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
of_phy_put(mac->serdes_phy);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index a58cab188a99..c1ec9efd413a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -30,8 +30,14 @@ struct dpaa2_mac {
struct phy *serdes_phy;
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io);
+static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
+{
+ if (!mac)
+ return false;
+
+ return mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE;
+}
int dpaa2_mac_open(struct dpaa2_mac *mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index c8cb541572ff..90d23ab1ce9d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/msi.h>
#include <linux/fsl/mc.h>
#include "dpaa2-ptp.h"
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 720c9230cab5..6bc1988be311 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct dpsw_link_state state = {0};
- int err = 0;
+ int err;
+
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+ link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
+ return err;
+ }
- if (dpaa2_switch_port_is_type_phy(port_priv))
- return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
- link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
@@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
bool if_running;
int err = 0, ret;
- if (dpaa2_switch_port_is_type_phy(port_priv))
- return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
- link_ksettings);
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+ link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&port_priv->mac_lock);
/* Interface needs to be down to change link settings */
if_running = netif_running(netdev);
@@ -145,14 +159,9 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
static int
dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
-
switch (sset) {
case ETH_SS_STATS:
- if (port_priv->mac)
- num_ss_stats += dpaa2_mac_get_sset_count();
- return num_ss_stats;
+ return DPAA2_SWITCH_NUM_COUNTERS + dpaa2_mac_get_sset_count();
default:
return -EOPNOTSUPP;
}
@@ -161,7 +170,6 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -172,8 +180,7 @@ static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (port_priv->mac)
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(p);
break;
}
}
@@ -196,8 +203,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
dpaa2_switch_ethtool_counters[i].name, err);
}
- if (port_priv->mac)
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_has_mac(port_priv))
dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+
+ mutex_unlock(&port_priv->mac_lock);
}
const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 2b5909fa93cf..f4ae4289c41a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/iommu.h>
@@ -603,8 +602,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
+ * We can avoid locking because we are called from the "link changed"
+ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
+ * (the writer to port_priv->mac), so we cannot race with it.
*/
- if (dpaa2_switch_port_is_type_phy(port_priv))
+ if (dpaa2_mac_is_type_phy(port_priv->mac))
return 0;
/* Interrupts are received even though no one issued an 'ifconfig up'
@@ -684,6 +686,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ mutex_lock(&port_priv->mac_lock);
+
if (!dpaa2_switch_port_is_type_phy(port_priv)) {
/* Explicitly set carrier off, otherwise
* netif_carrier_ok() will return true and cause 'ip link show'
@@ -697,16 +701,17 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
if (err) {
+ mutex_unlock(&port_priv->mac_lock);
netdev_err(netdev, "dpsw_if_enable err %d\n", err);
return err;
}
dpaa2_switch_enable_ctrl_if_napi(ethsw);
- if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ if (dpaa2_switch_port_is_type_phy(port_priv))
dpaa2_mac_start(port_priv->mac);
- phylink_start(port_priv->mac->phylink);
- }
+
+ mutex_unlock(&port_priv->mac_lock);
return 0;
}
@@ -717,14 +722,17 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ mutex_lock(&port_priv->mac_lock);
+
if (dpaa2_switch_port_is_type_phy(port_priv)) {
- phylink_stop(port_priv->mac->phylink);
dpaa2_mac_stop(port_priv->mac);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
+ mutex_unlock(&port_priv->mac_lock);
+
err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
@@ -1453,9 +1461,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
err = dpaa2_mac_open(mac);
if (err)
goto err_free_mac;
- port_priv->mac = mac;
- if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ if (dpaa2_mac_is_type_phy(mac)) {
err = dpaa2_mac_connect(mac);
if (err) {
netdev_err(port_priv->netdev,
@@ -1465,11 +1472,14 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
}
}
+ mutex_lock(&port_priv->mac_lock);
+ port_priv->mac = mac;
+ mutex_unlock(&port_priv->mac_lock);
+
return 0;
err_close_mac:
dpaa2_mac_close(mac);
- port_priv->mac = NULL;
err_free_mac:
kfree(mac);
return err;
@@ -1477,15 +1487,21 @@ err_free_mac:
static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
{
- if (dpaa2_switch_port_is_type_phy(port_priv))
- dpaa2_mac_disconnect(port_priv->mac);
+ struct dpaa2_mac *mac;
+
+ mutex_lock(&port_priv->mac_lock);
+ mac = port_priv->mac;
+ port_priv->mac = NULL;
+ mutex_unlock(&port_priv->mac_lock);
- if (!dpaa2_switch_port_has_mac(port_priv))
+ if (!mac)
return;
- dpaa2_mac_close(port_priv->mac);
- kfree(port_priv->mac);
- port_priv->mac = NULL;
+ if (dpaa2_mac_is_type_phy(mac))
+ dpaa2_mac_disconnect(mac);
+
+ dpaa2_mac_close(mac);
+ kfree(mac);
}
static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
@@ -1495,6 +1511,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
struct ethsw_port_priv *port_priv;
u32 status = ~0;
int err, if_id;
+ bool had_mac;
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
@@ -1512,12 +1529,15 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
}
if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
- rtnl_lock();
- if (dpaa2_switch_port_has_mac(port_priv))
+ /* We can avoid locking because the "endpoint changed" IRQ
+ * handler is the only one who changes priv->mac at runtime,
+ * so we are not racing with anyone.
+ */
+ had_mac = !!port_priv->mac;
+ if (had_mac)
dpaa2_switch_port_disconnect_mac(port_priv);
else
dpaa2_switch_port_connect_mac(port_priv);
- rtnl_unlock();
}
out:
@@ -2935,9 +2955,7 @@ static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
{
struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
- rtnl_lock();
dpaa2_switch_port_disconnect_mac(port_priv);
- rtnl_unlock();
free_netdev(port_priv->netdev);
ethsw->ports[port_idx] = NULL;
}
@@ -3256,6 +3274,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
port_priv->netdev = port_netdev;
port_priv->ethsw_data = ethsw;
+ mutex_init(&port_priv->mac_lock);
+
port_priv->idx = port_idx;
port_priv->stp_state = BR_STATE_FORWARDING;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
index 0002dca4d417..42b3ca73f55d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -161,6 +161,8 @@ struct ethsw_port_priv {
struct dpaa2_switch_filter_block *filter_block;
struct dpaa2_mac *mac;
+ /* Protects against changes to port_priv->mac */
+ struct mutex mac_lock;
};
/* Switch data */
@@ -230,12 +232,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
static inline bool
dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
{
- if (port_priv->mac &&
- (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
- port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
- return true;
-
- return false;
+ return dpaa2_mac_is_type_phy(port_priv->mac);
}
static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
new file mode 100644
index 000000000000..051748b997f3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2022 NXP
+ */
+#include <linux/filter.h>
+#include <linux/compiler.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
+
+#include "dpaa2-eth.h"
+
+static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ enum dpaa2_eth_fq_type type,
+ dpaa2_eth_consume_cb_t *consume)
+{
+ struct dpaa2_eth_fq *fq;
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+
+ if (fq->type != type)
+ continue;
+ if (fq->channel != ch)
+ continue;
+
+ fq->consume = consume;
+ }
+}
+
+static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff *xdp_buff;
+ struct dpaa2_eth_swa *swa;
+ u32 xdp_act = XDP_PASS;
+ int err;
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE +
+ ch->xsk_pool->umem->headroom);
+ xdp_buff = swa->xsk.xdp_buff;
+
+ xdp_buff->data_hard_start = vaddr;
+ xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd);
+ xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd);
+ xdp_set_data_meta_invalid(xdp_buff);
+ xdp_buff->rxq = &ch->xdp_rxq;
+
+ xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
+ xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr);
+ dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data);
+
+ if (likely(xdp_act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog);
+ if (unlikely(err)) {
+ ch->stats.xdp_drop++;
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ } else {
+ ch->buf_count--;
+ ch->stats.xdp_redirect++;
+ }
+
+ goto xdp_redir;
+ }
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ }
+
+xdp_redir:
+ ch->xdp.res |= xdp_act;
+out:
+ return xdp_act;
+}
+
+/* Rx frame processing routine for the AF_XDP fast path */
+static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb;
+ void *vaddr;
+ u32 xdp_act;
+
+ trace_dpaa2_rx_xsk_fd(priv->net_dev, fd);
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ if (fd_format != dpaa2_fd_single) {
+ WARN_ON(priv->xdp_prog);
+ /* AF_XDP doesn't support any other formats */
+ goto err_frame_format;
+ }
+
+ xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ /* Build skb */
+ skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr);
+ if (!skb)
+ /* Nothing else we can do, recycle the buffer and
+ * drop the frame.
+ */
+ goto err_alloc_skb;
+
+ /* Send the skb to the Linux networking stack */
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
+
+ return;
+
+err_alloc_skb:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv,
+ struct dpni_pools_cfg *pools_params)
+{
+ int curr_bp = 0, i, j;
+
+ pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN;
+ for (i = 0; i < priv->num_bps; i++) {
+ for (j = 0; j < priv->num_channels; j++)
+ if (priv->bp[i] == priv->channel[j]->bp)
+ pools_params->pools[curr_bp].priority_mask |= (1 << j);
+ if (!pools_params->pools[curr_bp].priority_mask)
+ continue;
+
+ pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
+ pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
+ pools_params->pools[curr_bp++].backup_pool = 0;
+ }
+ pools_params->num_dpbp = curr_bp;
+}
+
+static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid);
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpni_pools_cfg pools_params = { 0 };
+ struct dpaa2_eth_channel *ch;
+ int err;
+ bool up;
+
+ ch = priv->channel[qid];
+ if (!ch->xsk_pool)
+ return -EINVAL;
+
+ up = netif_running(dev);
+ if (up)
+ dev_close(dev);
+
+ xsk_pool_dma_unmap(pool, 0);
+ err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err)
+ netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n",
+ err);
+
+ dpaa2_eth_free_dpbp(priv, ch->bp);
+
+ ch->xsk_zc = false;
+ ch->xsk_pool = NULL;
+ ch->xsk_tx_pkts_sent = 0;
+ ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
+
+ dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx);
+
+ dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err)
+ netdev_err(dev, "dpni_set_pools() failed\n");
+
+ if (up) {
+ err = dev_open(dev, NULL);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_xsk_enable_pool(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpni_pools_cfg pools_params = { 0 };
+ struct dpaa2_eth_channel *ch;
+ int err, err2;
+ bool up;
+
+ if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) {
+ netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->dpni_attrs.num_queues > 8) {
+ netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n");
+ return -EOPNOTSUPP;
+ }
+
+ up = netif_running(dev);
+ if (up)
+ dev_close(dev);
+
+ err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0);
+ if (err) {
+ netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n",
+ err);
+ goto err_dma_unmap;
+ }
+
+ ch = priv->channel[qid];
+ err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err) {
+ netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err);
+ goto err_mem_model;
+ }
+ xsk_pool_set_rxq_info(pool, &ch->xdp_rxq);
+
+ priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv);
+ if (IS_ERR(priv->bp[priv->num_bps])) {
+ err = PTR_ERR(priv->bp[priv->num_bps]);
+ goto err_bp_alloc;
+ }
+ ch->xsk_zc = true;
+ ch->xsk_pool = pool;
+ ch->bp = priv->bp[priv->num_bps++];
+
+ dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx);
+
+ dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ netdev_err(dev, "dpni_set_pools() failed\n");
+ goto err_set_pools;
+ }
+
+ if (up) {
+ err = dev_open(dev, NULL);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+err_set_pools:
+ err2 = dpaa2_xsk_disable_pool(dev, qid);
+ if (err2)
+ netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2);
+err_bp_alloc:
+ err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err2)
+ netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2);
+err_mem_model:
+ xsk_pool_dma_unmap(pool, 0);
+err_dma_unmap:
+ if (up)
+ dev_open(dev, NULL);
+
+ return err;
+}
+
+int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
+{
+ return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) :
+ dpaa2_xsk_disable_pool(dev, qid);
+}
+
+int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch = priv->channel[qid];
+
+ if (!priv->link_state.up)
+ return -ENETDOWN;
+
+ if (!priv->xdp_prog)
+ return -EINVAL;
+
+ if (!ch->xsk_zc)
+ return -EINVAL;
+
+ /* We do not have access to a per channel SW interrupt, so instead we
+ * schedule a NAPI instance.
+ */
+ if (!napi_if_scheduled_mark_missed(&ch->napi))
+ napi_schedule(&ch->napi);
+
+ return 0;
+}
+
+static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ struct xdp_desc *xdp_desc)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ void *sgt_buf = NULL;
+ dma_addr_t sgt_addr;
+ int sgt_buf_size;
+ dma_addr_t addr;
+ int err = 0;
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf))
+ return -ENOMEM;
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Get the address of the XSK Tx buffer */
+ addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr);
+ xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len);
+
+ /* Fill in the HW SGT structure */
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, xdp_desc->len);
+ dpaa2_sg_set_final(sgt, true);
+
+ /* Store the necessary info in the SGT buffer */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_XSK;
+ swa->xsk.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ err = -ENOMEM;
+ goto sgt_map_failed;
+ }
+
+ /* Initialize FD fields */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, xdp_desc->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+sgt_map_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+
+ return err;
+}
+
+bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
+{
+ struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ int budget = DPAA2_ETH_TX_ZC_PER_NAPI;
+ int total_enqueued, enqueued;
+ int retries, max_retries;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int batch, i, err;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fds = (this_cpu_ptr(priv->fd))->array;
+
+ /* Use the FQ with the same idx as the affine CPU */
+ fq = &priv->fq[ch->nctx.desired_cpu];
+
+ batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget);
+ if (!batch)
+ return false;
+
+ /* Create a FD for each XSK frame to be sent */
+ for (i = 0; i < batch; i++) {
+ err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]);
+ if (err) {
+ batch = i;
+ break;
+ }
+
+ trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]);
+ }
+
+ /* Enqueue all the created FDs */
+ max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES;
+ total_enqueued = 0;
+ enqueued = 0;
+ retries = 0;
+ while (total_enqueued < batch && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued], 0,
+ batch - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
+ }
+ percpu_extras->tx_portal_busy += retries;
+
+ /* Update statistics */
+ percpu_stats->tx_packets += total_enqueued;
+ for (i = 0; i < total_enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ for (i = total_enqueued; i < batch; i++) {
+ dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false);
+ percpu_stats->tx_errors++;
+ }
+
+ xsk_tx_release(ch->xsk_pool);
+
+ return total_enqueued == budget;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 828f538097af..be9492b8d5dc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -13,10 +13,12 @@
#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_2ND_VERSION 2
+#define DPNI_CMD_3RD_VERSION 3
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION)
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
@@ -39,7 +41,7 @@
#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
-#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200)
#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
@@ -115,14 +117,19 @@ struct dpni_cmd_open {
};
#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+
+struct dpni_cmd_pool {
+ __le16 dpbp_id;
+ u8 priority_mask;
+ u8 pad;
+};
+
struct dpni_cmd_set_pools {
- /* cmd word 0 */
u8 num_dpbp;
u8 backup_pool_mask;
- __le16 pad;
- /* cmd word 0..4 */
- __le32 dpbp_id[DPNI_MAX_DPBP];
- /* cmd word 4..6 */
+ u8 pad;
+ u8 pool_options;
+ struct dpni_cmd_pool pool[DPNI_MAX_DPBP];
__le16 buffer_size[DPNI_MAX_DPBP];
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 6c3b36f20fb8..02601a283b59 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
+ cmd_params->pool_options = cfg->pool_options;
for (i = 0; i < DPNI_MAX_DPBP; i++) {
- cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].dpbp_id =
+ cpu_to_le16(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].priority_mask =
+ cfg->pools[i].priority_mask;
cmd_params->buffer_size[i] =
cpu_to_le16(cfg->pools[i].buffer_size);
cmd_params->backup_pool_mask |=
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 6fffd519aa00..5c0a1d5ac934 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+#define DPNI_POOL_ASSOC_QPRI 0
+#define DPNI_POOL_ASSOC_QDBIN 1
+
/**
* struct dpni_pools_cfg - Structure representing buffer pools configuration
* @num_dpbp: Number of DPBPs
+ * @pool_options: Buffer assignment options.
+ * This field is a combination of DPNI_POOL_ASSOC_flags
* @pools: Array of buffer pools parameters; The number of valid entries
* must match 'num_dpbp' value
* @pools.dpbp_id: DPBP object ID
+ * @pools.priority: Priority mask that indicates TC's used with this buffer.
+ * If set to 0x00 MC will assume value 0xff.
* @pools.buffer_size: Buffer size
* @pools.backup_pool: Backup pool
*/
struct dpni_pools_cfg {
u8 num_dpbp;
+ u8 pool_options;
struct {
int dpbp_id;
+ u8 priority_mask;
u16 buffer_size;
int backup_pool;
} pools[DPNI_MAX_DPBP];
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index bdf94335ee99..9f6c4f5c0a6c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1111,7 +1111,6 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = enetc_pl_mac_select_pcs,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 33f84a30e167..5ba1e0d71c68 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -348,7 +348,6 @@ struct bufdesc_ex {
*/
#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
-
#define FEC_ENET_RX_PAGES 256
#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -527,6 +526,19 @@ struct fec_enet_priv_txrx_info {
struct sk_buff *skb;
};
+enum {
+ RX_XDP_REDIRECT = 0,
+ RX_XDP_PASS,
+ RX_XDP_DROP,
+ RX_XDP_TX,
+ RX_XDP_TX_ERRORS,
+ TX_XDP_XMIT,
+ TX_XDP_XMIT_ERRORS,
+
+ /* The following must be the last one */
+ XDP_STATS_TOTAL,
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -547,6 +559,7 @@ struct fec_enet_priv_rx_q {
/* page_pool */
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
+ u32 stats[XDP_STATS_TOTAL];
/* rx queue number, in the range 0-7 */
u8 id;
@@ -658,9 +671,14 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ struct hrtimer perout_timer;
+ u64 perout_stime;
struct imx_sc_ipc *ipc_handle;
+ /* XDP BPF Program */
+ struct bpf_prog *xdp_prog;
+
u64 ethtool_stats[];
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 23e1a94b9ce4..5528b0af82ae 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -89,6 +89,11 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+#define FEC_ENET_XDP_PASS 0
+#define FEC_ENET_XDP_CONSUMED BIT(0)
+#define FEC_ENET_XDP_TX BIT(1)
+#define FEC_ENET_XDP_REDIR BIT(2)
+
struct fec_devinfo {
u32 quirks;
};
@@ -365,16 +370,6 @@ static void swap_buffer(void *bufaddr, int len)
swab32s(buf);
}
-static void swap_buffer2(void *dst_buf, void *src_buf, int len)
-{
- int i;
- unsigned int *src = src_buf;
- unsigned int *dst = dst_buf;
-
- for (i = 0; i < len; i += 4, src++, dst++)
- *dst = swab32p(src);
-}
-
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -428,13 +423,14 @@ static int
fec_enet_create_page_pool(struct fec_enet_private *fep,
struct fec_enet_priv_rx_q *rxq, int size)
{
+ struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = FEC_ENET_XDP_HEADROOM,
.max_len = FEC_ENET_RX_FRSIZE,
};
@@ -1494,53 +1490,6 @@ static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx_queue(ndev, i);
}
-static int __maybe_unused
-fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- int off;
-
- off = ((unsigned long)skb->data) & fep->rx_align;
- if (off)
- skb_reserve(skb, fep->rx_align + 1 - off);
-
- bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
- if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
- if (net_ratelimit())
- netdev_err(ndev, "Rx DMA memory map failed\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static bool __maybe_unused
-fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct sk_buff *new_skb;
-
- if (length > fep->rx_copybreak)
- return false;
-
- new_skb = netdev_alloc_skb(ndev, length);
- if (!new_skb)
- return false;
-
- dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- if (!swap)
- memcpy(new_skb->data, (*skb)->data, length);
- else
- swap_buffer2(new_skb->data, (*skb)->data, length);
- *skb = new_skb;
-
- return true;
-}
-
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
@@ -1556,6 +1505,62 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
}
+static u32
+fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
+ struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
+{
+ unsigned int sync, len = xdp->data_end - xdp->data;
+ u32 ret = FEC_ENET_XDP_PASS;
+ struct page *page;
+ int err;
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM;
+ sync = max(sync, len);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ ret = FEC_ENET_XDP_PASS;
+ break;
+
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(fep->netdev, xdp, prog);
+ if (!err) {
+ ret = FEC_ENET_XDP_REDIR;
+ } else {
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ }
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
+ fallthrough;
+
+ case XDP_TX:
+ bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
+ fallthrough;
+
+ case XDP_ABORTED:
+ fallthrough; /* handle aborts by dropping packet */
+
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ break;
+ }
+
+ return ret;
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -1577,7 +1582,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag;
int index = 0;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
+ u32 ret, xdp_result = FEC_ENET_XDP_PASS;
+ u32 data_start = FEC_ENET_XDP_HEADROOM;
+ struct xdp_buff xdp;
struct page *page;
+ u32 sub_len = 4;
+
+#if !defined(CONFIG_M5272)
+ /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
+ * FEC_RACC_SHIFT16 is set by default in the probe function.
+ */
+ if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+ data_start += 2;
+ sub_len += 2;
+ }
+#endif
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1588,6 +1608,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1637,23 +1658,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(page_address(page));
fec_enet_update_cbd(rxq, bdp, index);
+ if (xdp_prog) {
+ xdp_buff_clear_frags_flag(&xdp);
+ /* subtract 16bit shift and FCS */
+ xdp_prepare_buff(&xdp, page_address(page),
+ data_start, pkt_len - sub_len, false);
+ ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
+ xdp_result |= ret;
+ if (ret != FEC_ENET_XDP_PASS)
+ goto rx_processing_done;
+ }
+
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
skb = build_skb(page_address(page), PAGE_SIZE);
- skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
- skb_put(skb, pkt_len - 4);
+ skb_reserve(skb, data_start);
+ skb_put(skb, pkt_len - sub_len);
skb_mark_for_recycle(skb);
- data = skb->data;
- if (need_swap)
+ if (unlikely(need_swap)) {
+ data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
-
-#if !defined(CONFIG_M5272)
- if (fep->quirks & FEC_QUIRK_HAS_RACC)
- data = skb_pull_inline(skb, 2);
-#endif
+ }
+ data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1732,6 +1761,10 @@ rx_processing_done:
writel(0, rxq->bd.reg_desc_active);
}
rxq->bd.cur = bdp;
+
+ if (xdp_result & FEC_ENET_XDP_REDIR)
+ xdp_do_flush_map();
+
return pkt_received;
}
@@ -2226,7 +2259,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = 1;
+ phy_dev->mac_managed_pm = true;
phy_attached_info(phy_dev);
@@ -2671,6 +2704,16 @@ static const struct fec_stat {
#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
+static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
+ "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
+ "rx_xdp_pass", /* RX_XDP_PASS, */
+ "rx_xdp_drop", /* RX_XDP_DROP, */
+ "rx_xdp_tx", /* RX_XDP_TX, */
+ "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
+ "tx_xdp_xmit", /* TX_XDP_XMIT, */
+ "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
+};
+
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@@ -2680,6 +2723,40 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev)
fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
}
+static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
+{
+ u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ xdp_stats[j] += rxq->stats[j];
+ }
+
+ memcpy(data, xdp_stats, sizeof(xdp_stats));
+}
+
+static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ if (!rxq->page_pool)
+ continue;
+
+ page_pool_get_stats(rxq->page_pool, &stats);
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void fec_enet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2689,6 +2766,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,
fec_enet_update_ethtool_stats(dev);
memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+ data += FEC_STATS_SIZE / sizeof(u64);
+
+ fec_enet_get_xdp_stats(fep, data);
+ data += XDP_STATS_TOTAL;
+
+ fec_enet_page_pool_stats(fep, data);
}
static void fec_enet_get_strings(struct net_device *netdev,
@@ -2697,9 +2780,16 @@ static void fec_enet_get_strings(struct net_device *netdev,
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- fec_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
+ memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
+ strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ page_pool_ethtool_stats_get_strings(data);
+
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -2709,9 +2799,14 @@ static void fec_enet_get_strings(struct net_device *netdev,
static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
+ int count;
+
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(fec_stats);
+ count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+
case ETH_SS_TEST:
return net_selftest_get_count();
default:
@@ -2722,7 +2817,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static void fec_enet_clear_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- int i;
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
/* Disable MIB statistics counters */
writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
@@ -2730,6 +2826,12 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
writel(0, fep->hwp + fec_stats[i].offset);
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ rxq->stats[j] = 0;
+ }
+
/* Don't disable MIB statistics counters */
writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
}
@@ -3083,6 +3185,9 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (i = 0; i < rxq->bd.ring_size; i++)
page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+ for (i = 0; i < XDP_STATS_TOTAL; i++)
+ rxq->stats[i] = 0;
+
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
xdp_rxq_info_unreg(&rxq->xdp_rxq);
page_pool_destroy(rxq->page_pool);
@@ -3562,6 +3667,150 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
+static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ bool is_run = netif_running(dev);
+ struct bpf_prog *old_prog;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ /* No need to support the SoCs that require to
+ * do the frame swap because the performance wouldn't be
+ * better than the skb mode.
+ */
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ return -EOPNOTSUPP;
+
+ if (is_run) {
+ napi_disable(&fep->napi);
+ netif_tx_disable(dev);
+ }
+
+ old_prog = xchg(&fep->xdp_prog, bpf->prog);
+ fec_restart(dev);
+
+ if (is_run) {
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(dev);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+
+ case XDP_SETUP_XSK_POOL:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
+static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq,
+ struct xdp_frame *frame)
+{
+ unsigned int index, status, estatus;
+ struct bufdesc *bdp, *last_bdp;
+ dma_addr_t dma_addr;
+ int entries_free;
+
+ entries_free = fec_enet_get_free_txdesc_num(txq);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ netdev_err(fep->netdev, "NOT enough BD for SG!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->bd.cur;
+ last_bdp = bdp;
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+
+ dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
+ frame->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dma_addr))
+ return FEC_ENET_XDP_CONSUMED;
+
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex)
+ estatus = BD_ENET_TX_INT;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
+ bdp->cbd_datlen = cpu_to_fec16(frame->len);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ index = fec_enet_get_bd_index(last_bdp, &txq->bd);
+ txq->tx_skbuff[index] = NULL;
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
+
+ txq->bd.cur = bdp;
+
+ return 0;
+}
+
+static int fec_enet_xdp_xmit(struct net_device *dev,
+ int num_frames,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ unsigned int queue;
+ int i;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (i = 0; i < num_frames; i++)
+ fec_enet_txq_xmit_frame(fep, txq, frames[i]);
+
+ /* Make sure the update to bdp and tx_skbuff are performed. */
+ wmb();
+
+ /* Trigger transmission start */
+ writel(0, txq->bd.reg_desc_active);
+
+ __netif_tx_unlock(nq);
+
+ return num_frames;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -3576,6 +3825,8 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_poll_controller = fec_poll_controller,
#endif
.ndo_set_features = fec_set_features,
+ .ndo_bpf = fec_enet_bpf,
+ .ndo_xdp_xmit = fec_enet_xdp_xmit,
};
static const unsigned short offset_des_active_rxq[] = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index cffd9ad499dd..ab86bb8562ef 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -88,6 +88,9 @@
#define FEC_CHANNLE_0 0
#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
+#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
+
/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
@@ -198,6 +201,78 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
return 0;
}
+static int fec_ptp_pps_perout(struct fec_enet_private *fep)
+{
+ u32 compare_val, ptp_hc, temp_val;
+ u64 curr_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* Update time counter */
+ timecounter_read(&fep->tc);
+
+ /* Get the current ptp hardware time counter */
+ temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+ temp_val |= FEC_T_CTRL_CAPTURE;
+ writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ ptp_hc = readl(fep->hwp + FEC_ATIME);
+
+ /* Convert the ptp local counter to 1588 timestamp */
+ curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
+
+ /* If the pps start time less than current time add 100ms, just return.
+ * Because the software might not able to set the comparison time into
+ * the FEC_TCCR register in time and missed the start time.
+ */
+ if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return -1;
+ }
+
+ compare_val = fep->perout_stime - curr_time + ptp_hc;
+ compare_val &= fep->cc.mask;
+
+ writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask;
+
+ /* Enable compare event when overflow */
+ temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+ temp_val |= FEC_T_CTRL_PINPER;
+ writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+
+ /* Compare channel setting. */
+ temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+ temp_val &= ~(1 << FEC_T_TDRE_OFFSET);
+ temp_val &= ~(FEC_T_TMODE_MASK);
+ temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET);
+ writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* Write the second compare event timestamp and calculate
+ * the third timestamp. Refer the TCCR register detail in the spec.
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
+{
+ struct fec_enet_private *fep = container_of(timer,
+ struct fec_enet_private, perout_timer);
+
+ fec_ptp_pps_perout(fep);
+
+ return HRTIMER_NORESTART;
+}
+
/**
* fec_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
@@ -263,18 +338,21 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
}
/**
- * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * fec_ptp_adjfine - adjust ptp cycle frequency
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
* Adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * indicated amount from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*
* Because ENET hardware frequency adjust is complex,
* using software method to do that.
*/
-static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
unsigned long flags;
int neg_adj = 0;
u32 i, tmp;
@@ -425,6 +503,17 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
+static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ writel(0, fep->hwp + FEC_TCSR(channel));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
/**
* fec_ptp_enable
* @ptp: the ptp clock structure
@@ -437,14 +526,84 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
+ ktime_t timeout;
+ struct timespec64 start_time, period;
+ u64 curr_time, delta, period_ns;
+ unsigned long flags;
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
ret = fec_ptp_enable_pps(fep, on);
return ret;
+ } else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index != DEFAULT_PPS_CHANNEL)
+ return -EOPNOTSUPP;
+
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ period.tv_sec = rq->perout.period.sec;
+ period.tv_nsec = rq->perout.period.nsec;
+ period_ns = timespec64_to_ns(&period);
+
+ /* FEC PTP timer only has 31 bits, so if the period exceed
+ * 4s is not supported.
+ */
+ if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) {
+ dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n");
+ return -EOPNOTSUPP;
+ }
+
+ fep->reload_period = div_u64(period_ns, 2);
+ if (on && fep->reload_period) {
+ /* Convert 1588 timestamp to ns*/
+ start_time.tv_sec = rq->perout.start.sec;
+ start_time.tv_nsec = rq->perout.start.nsec;
+ fep->perout_stime = timespec64_to_ns(&start_time);
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ /* Read current timestamp */
+ curr_time = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ /* Calculate time difference */
+ delta = fep->perout_stime - curr_time;
+
+ if (fep->perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
+ return -EINVAL;
+ }
+
+ /* Because the timer counter of FEC only has 31-bits, correspondingly,
+ * the time comparison register FEC_TCCR also only low 31 bits can be
+ * set. If the start time of pps signal exceeds current time more than
+ * 0x80000000 ns, a software timer is used and the timer expires about
+ * 1 second before the start time to be able to set FEC_TCCR.
+ */
+ if (delta > FEC_PTP_MAX_NSEC_COUNTER) {
+ timeout = ns_to_ktime(delta - NSEC_PER_SEC);
+ hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL);
+ } else {
+ return fec_ptp_pps_perout(fep);
+ }
+ } else {
+ fec_ptp_pps_disable(fep, fep->pps_channel);
+ }
+
+ return 0;
+ } else {
+ return -EOPNOTSUPP;
}
- return -EOPNOTSUPP;
}
/**
@@ -583,10 +742,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
- fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_per_out = 1;
fep->ptp_caps.n_pins = 0;
fep->ptp_caps.pps = 1;
- fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjfine = fec_ptp_adjfine;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
fep->ptp_caps.gettime64 = fec_ptp_gettime;
fep->ptp_caps.settime64 = fec_ptp_settime;
@@ -605,6 +764,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+ hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
+ fep->perout_timer.function = fec_ptp_pps_perout_handler;
+
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
irq = platform_get_irq_optional(pdev, irq_idx);
@@ -634,6 +796,7 @@ void fec_ptp_stop(struct platform_device *pdev)
struct fec_enet_private *fep = netdev_priv(ndev);
cancel_delayed_work_sync(&fep->time_keep);
+ hrtimer_cancel(&fep->perout_timer);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 48bf8088795d..a55542c1ad65 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -3,7 +3,8 @@ config FSL_FMAN
tristate "FMan support"
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
- select PHYLIB
+ select PHYLINK
+ select PCS_LYNX
select CRC32
default n
help
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 6617932fd3fd..d00bae15a901 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/of_mdio.h>
#include <linux/mii.h>
+#include <linux/netdevice.h>
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -29,9 +30,6 @@
#define TBICON_CLK_SELECT 0x0020 /* Clock select */
#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
-#define TBIANA_SGMII 0x4001
-#define TBIANA_1000X 0x01a0
-
/* Interrupt Mask Register (IMASK) */
#define DTSEC_IMASK_BREN 0x80000000
#define DTSEC_IMASK_RXCEN 0x40000000
@@ -92,9 +90,10 @@
#define DTSEC_ECNTRL_GMIIM 0x00000040
#define DTSEC_ECNTRL_TBIM 0x00000020
-#define DTSEC_ECNTRL_SGMIIM 0x00000002
#define DTSEC_ECNTRL_RPM 0x00000010
#define DTSEC_ECNTRL_R100M 0x00000008
+#define DTSEC_ECNTRL_RMM 0x00000004
+#define DTSEC_ECNTRL_SGMIIM 0x00000002
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
#define TCTRL_TTSE 0x00000040
@@ -318,7 +317,8 @@ struct fman_mac {
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
- struct phy_device *tbiphy;
+ struct mdio_device *tbidev;
+ struct phylink_pcs pcs;
};
static void set_dflts(struct dtsec_cfg *cfg)
@@ -356,56 +356,14 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
phy_interface_t iface, u16 iface_speed, u64 addr,
u32 exception_mask, u8 tbi_addr)
{
- bool is_rgmii, is_sgmii, is_qsgmii;
enet_addr_t eth_addr;
- u32 tmp;
+ u32 tmp = 0;
int i;
/* Soft reset */
iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
iowrite32be(0, &regs->maccfg1);
- /* dtsec_id2 */
- tmp = ioread32be(&regs->tsec_id2);
-
- /* check RGMII support */
- if (iface == PHY_INTERFACE_MODE_RGMII ||
- iface == PHY_INTERFACE_MODE_RGMII_ID ||
- iface == PHY_INTERFACE_MODE_RGMII_RXID ||
- iface == PHY_INTERFACE_MODE_RGMII_TXID ||
- iface == PHY_INTERFACE_MODE_RMII)
- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
- return -EINVAL;
-
- if (iface == PHY_INTERFACE_MODE_SGMII ||
- iface == PHY_INTERFACE_MODE_MII)
- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
- return -EINVAL;
-
- is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
- iface == PHY_INTERFACE_MODE_RGMII_ID ||
- iface == PHY_INTERFACE_MODE_RGMII_RXID ||
- iface == PHY_INTERFACE_MODE_RGMII_TXID;
- is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
- is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
-
- tmp = 0;
- if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
- tmp |= DTSEC_ECNTRL_GMIIM;
- if (is_sgmii)
- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
- if (is_qsgmii)
- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
- DTSEC_ECNTRL_QSGMIIM);
- if (is_rgmii)
- tmp |= DTSEC_ECNTRL_RPM;
- if (iface_speed == SPEED_100)
- tmp |= DTSEC_ECNTRL_R100M;
-
- iowrite32be(tmp, &regs->ecntrl);
-
- tmp = 0;
-
if (cfg->tx_pause_time)
tmp |= cfg->tx_pause_time;
if (cfg->tx_pause_time_extd)
@@ -446,17 +404,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp = 0;
- if (iface_speed < SPEED_1000)
- tmp |= MACCFG2_NIBBLE_MODE;
- else if (iface_speed == SPEED_1000)
- tmp |= MACCFG2_BYTE_MODE;
-
tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
MACCFG2_PREAMBLE_LENGTH_MASK;
if (cfg->tx_pad_crc)
tmp |= MACCFG2_PAD_CRC_EN;
- /* Full Duplex */
- tmp |= MACCFG2_FULL_DUPLEX;
iowrite32be(tmp, &regs->maccfg2);
tmp = (((cfg->non_back_to_back_ipg1 <<
@@ -525,10 +476,6 @@ static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
static int check_init_parameters(struct fman_mac *dtsec)
{
- if (dtsec->max_speed >= SPEED_10000) {
- pr_err("1G MAC driver supports 1G or lower speeds\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -630,22 +577,10 @@ static int get_exception_flag(enum fman_mac_exceptions exception)
return bit_mask;
}
-static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
-{
- /* Checks if dTSEC driver parameters were initialized */
- if (!dtsec_drv_params)
- return true;
-
- return false;
-}
-
static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (is_init_done(dtsec->dtsec_drv_param))
- return 0;
-
return (u16)ioread32be(&regs->maxfrm);
}
@@ -682,6 +617,7 @@ static void dtsec_isr(void *handle)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
if (event & DTSEC_IMASK_XFUNEN) {
/* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+ /* FIXME: This races with the rest of the driver! */
if (dtsec->fm_rev_info.major == 2) {
u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
/* a. Write 0x00E0_0C00 to DTSEC_ID
@@ -814,6 +750,43 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
+static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct fman_mac, pcs);
+}
+
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
+
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+}
+
+static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
+
+ return phylink_mii_c22_pcs_config(dtsec->tbidev, mode, interface,
+ advertising);
+}
+
+static void dtsec_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
+
+ phylink_mii_c22_pcs_an_restart(dtsec->tbidev);
+}
+
+static const struct phylink_pcs_ops dtsec_pcs_ops = {
+ .pcs_get_state = dtsec_pcs_get_state,
+ .pcs_config = dtsec_pcs_config,
+ .pcs_an_restart = dtsec_pcs_an_restart,
+};
+
static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -854,36 +827,11 @@ static void graceful_stop(struct fman_mac *dtsec)
static int dtsec_enable(struct fman_mac *dtsec)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- /* Enable */
- tmp = ioread32be(&regs->maccfg1);
- tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
- iowrite32be(tmp, &regs->maccfg1);
-
- /* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec);
-
return 0;
}
static void dtsec_disable(struct fman_mac *dtsec)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
-
- /* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec);
-
- tmp = ioread32be(&regs->maccfg1);
- tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
- iowrite32be(tmp, &regs->maccfg1);
}
static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
@@ -894,11 +842,6 @@ static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 ptv = 0;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- graceful_stop(dtsec);
-
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
@@ -919,8 +862,6 @@ static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
- graceful_start(dtsec);
-
return 0;
}
@@ -929,11 +870,6 @@ static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- graceful_stop(dtsec);
-
tmp = ioread32be(&regs->maccfg1);
if (en)
tmp |= MACCFG1_RX_FLOW;
@@ -941,17 +877,124 @@ static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
+ return 0;
+}
+
+static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
+{
+ struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &dtsec->pcs;
+ default:
+ return NULL;
+ }
+}
+
+static void dtsec_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs;
+ u32 tmp;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ tmp = DTSEC_ECNTRL_RMM;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM;
+ break;
+ default:
+ dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n",
+ phy_modes(state->interface));
+ return;
+ }
+
+ iowrite32be(tmp, &regs->ecntrl);
+}
+
+static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *dtsec = mac_dev->fman_mac;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+ u32 tmp;
+
+ dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0);
+ dtsec_accept_rx_pause_frames(dtsec, rx_pause);
+
+ tmp = ioread32be(&regs->ecntrl);
+ if (speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+ else
+ tmp &= ~DTSEC_ECNTRL_R100M;
+ iowrite32be(tmp, &regs->ecntrl);
+
+ tmp = ioread32be(&regs->maccfg2);
+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX);
+ if (speed >= SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+ else
+ tmp |= MACCFG2_NIBBLE_MODE;
+
+ if (duplex == DUPLEX_FULL)
+ tmp |= MACCFG2_FULL_DUPLEX;
+
+ iowrite32be(tmp, &regs->maccfg2);
+
+ mac_dev->update_speed(mac_dev, speed);
+
+ /* Enable */
+ tmp = ioread32be(&regs->maccfg1);
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ /* Graceful start - clear the graceful Rx/Tx stop bit */
graceful_start(dtsec);
+}
- return 0;
+static void dtsec_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ /* Graceful stop - Assert the graceful Rx/Tx stop bit */
+ graceful_stop(dtsec);
+
+ tmp = ioread32be(&regs->maccfg1);
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ iowrite32be(tmp, &regs->maccfg1);
}
+static const struct phylink_mac_ops dtsec_mac_ops = {
+ .mac_select_pcs = dtsec_select_pcs,
+ .mac_config = dtsec_mac_config,
+ .mac_link_up = dtsec_link_up,
+ .mac_link_down = dtsec_link_down,
+};
+
static int dtsec_modify_mac_address(struct fman_mac *dtsec,
const enet_addr_t *enet_addr)
{
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
@@ -975,9 +1018,6 @@ static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
@@ -1037,9 +1077,6 @@ static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->rctrl);
if (enable)
tmp |= RCTRL_MPROM;
@@ -1056,9 +1093,6 @@ static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
rctrl = ioread32be(&regs->rctrl);
tctrl = ioread32be(&regs->tctrl);
@@ -1087,9 +1121,6 @@ static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
@@ -1153,9 +1184,6 @@ static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
/* Set unicast promiscuous */
tmp = ioread32be(&regs->rctrl);
if (new_val)
@@ -1177,90 +1205,12 @@ static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
-{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- graceful_stop(dtsec);
-
- tmp = ioread32be(&regs->maccfg2);
-
- /* Full Duplex */
- tmp |= MACCFG2_FULL_DUPLEX;
-
- tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
- if (speed < SPEED_1000)
- tmp |= MACCFG2_NIBBLE_MODE;
- else if (speed == SPEED_1000)
- tmp |= MACCFG2_BYTE_MODE;
- iowrite32be(tmp, &regs->maccfg2);
-
- tmp = ioread32be(&regs->ecntrl);
- if (speed == SPEED_100)
- tmp |= DTSEC_ECNTRL_R100M;
- else
- tmp &= ~DTSEC_ECNTRL_R100M;
- iowrite32be(tmp, &regs->ecntrl);
-
- graceful_start(dtsec);
-
- return 0;
-}
-
-static int dtsec_restart_autoneg(struct fman_mac *dtsec)
-{
- u16 tmp_reg16;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
-
- tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
- tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
- BMCR_FULLDPLX | BMCR_SPEED1000);
-
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-
- return 0;
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- mac_dev->update_speed(mac_dev, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
static int dtsec_set_exception(struct fman_mac *dtsec,
enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
bit_mask = get_exception_flag(exception);
if (bit_mask) {
@@ -1310,12 +1260,9 @@ static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
- u16 max_frm_ln;
+ u16 max_frm_ln, tbicon;
int err;
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
if (DEFAULT_RESET_ON_INIT &&
(fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
pr_err("Can't reset MAC!\n");
@@ -1330,38 +1277,19 @@ static int dtsec_init(struct fman_mac *dtsec)
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
dtsec->max_speed, dtsec->addr, dtsec->exceptions,
- dtsec->tbiphy->mdio.addr);
+ dtsec->tbidev->addr);
if (err) {
free_init_resources(dtsec);
pr_err("DTSEC version doesn't support this i/f mode\n");
return err;
}
- if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
- u16 tmp_reg16;
-
- /* Configure the TBI PHY Control Register */
- tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+ /* Configure the TBI PHY Control Register */
+ tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
- tmp_reg16 = TBICON_CLK_SELECT;
- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
-
- tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
- BMCR_FULLDPLX | BMCR_SPEED1000);
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-
- if (dtsec->basex_if)
- tmp_reg16 = TBIANA_1000X;
- else
- tmp_reg16 = TBIANA_SGMII;
- phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
-
- tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
- BMCR_FULLDPLX | BMCR_SPEED1000);
-
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
- }
+ tbicon = TBICON_CLK_SELECT;
+ mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
/* Max Frame Length */
max_frm_ln = (u16)ioread32be(&regs->maxfrm);
@@ -1406,6 +1334,8 @@ static int dtsec_free(struct fman_mac *dtsec)
kfree(dtsec->dtsec_drv_param);
dtsec->dtsec_drv_param = NULL;
+ if (!IS_ERR_OR_NULL(dtsec->tbidev))
+ put_device(&dtsec->tbidev->dev);
kfree(dtsec);
return 0;
@@ -1434,7 +1364,6 @@ static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
dtsec->regs = mac_dev->vaddr;
dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- dtsec->max_speed = params->max_speed;
dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
@@ -1457,7 +1386,6 @@ static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
- dtsec->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
@@ -1476,18 +1404,18 @@ int dtsec_initialization(struct mac_device *mac_dev,
int err;
struct fman_mac *dtsec;
struct device_node *phy_node;
+ unsigned long capabilities;
+ unsigned long *supported;
+ mac_dev->phylink_ops = &dtsec_mac_ops;
mac_dev->set_promisc = dtsec_set_promiscuous;
mac_dev->change_addr = dtsec_modify_mac_address;
mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
mac_dev->set_tstamp = dtsec_set_tstamp;
mac_dev->set_multi = fman_set_multi;
- mac_dev->adjust_link = adjust_link_dtsec;
mac_dev->enable = dtsec_enable;
mac_dev->disable = dtsec_disable;
@@ -1502,19 +1430,56 @@ int dtsec_initialization(struct mac_device *mac_dev,
dtsec->dtsec_drv_param->tx_pad_crc = true;
phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
- if (!phy_node) {
- pr_err("TBI PHY node is not available\n");
+ if (!phy_node || of_device_is_available(phy_node)) {
+ of_node_put(phy_node);
err = -EINVAL;
+ dev_err_probe(mac_dev->dev, err,
+ "TBI PCS node is not available\n");
goto _return_fm_mac_free;
}
- dtsec->tbiphy = of_phy_find_device(phy_node);
- if (!dtsec->tbiphy) {
- pr_err("of_phy_find_device (TBI PHY) failed\n");
- err = -EINVAL;
+ dtsec->tbidev = of_mdio_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!dtsec->tbidev) {
+ err = -EPROBE_DEFER;
+ dev_err_probe(mac_dev->dev, err,
+ "could not find mdiodev for PCS\n");
goto _return_fm_mac_free;
}
- put_device(&dtsec->tbiphy->mdio.dev);
+ dtsec->pcs.ops = &dtsec_pcs_ops;
+ dtsec->pcs.poll = true;
+
+ supported = mac_dev->phylink_config.supported_interfaces;
+
+ /* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are
+ * supported? If not, we can determine support via the phy if SerDes
+ * support is added.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ } else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ }
+
+ if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) {
+ phy_interface_set_rgmii(supported);
+
+ /* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports
+ * RMII and RGMII. However, the only SoCs which support RMII
+ * are the P1017 and P1023. Avoid advertising this mode on
+ * other SoCs. This is a bit of a moot point, since there's no
+ * in-tree support for ethernet on these platforms...
+ */
+ if (of_machine_is_compatible("fsl,P1023") ||
+ of_machine_is_compatible("fsl,P1023RDB"))
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ }
+
+ capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+ mac_dev->phylink_config.mac_capabilities = capabilities;
err = dtsec_init(dtsec);
if (err < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 65887a3160d7..e5d6cddea731 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -170,20 +170,10 @@ struct fman_mac_params {
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* Note that the speed should indicate the maximum rate that
- * this MAC should support rather than the actual speed;
- */
- u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
- /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
- * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
- * to interface between MAC and phy/backplane, SGMII phy can still
- * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
- */
- bool basex_if;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 32d26cf17843..9349f841bd06 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -11,42 +11,12 @@
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/pcs-lynx.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <linux/phy/phy.h>
#include <linux/of_mdio.h>
-/* PCS registers */
-#define MDIO_SGMII_CR 0x00
-#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
-#define MDIO_SGMII_LINK_TMR_L 0x12
-#define MDIO_SGMII_LINK_TMR_H 0x13
-#define MDIO_SGMII_IF_MODE 0x14
-
-/* SGMII Control defines */
-#define SGMII_CR_AN_EN 0x1000
-#define SGMII_CR_RESTART_AN 0x0200
-#define SGMII_CR_FD 0x0100
-#define SGMII_CR_SPEED_SEL1_1G 0x0040
-#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
- SGMII_CR_SPEED_SEL1_1G)
-
-/* SGMII Device Ability for SGMII defines */
-#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
-#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
-
-/* Link timer define */
-#define LINK_TMR_L 0xa120
-#define LINK_TMR_H 0x0007
-#define LINK_TMR_L_BASEX 0xaf08
-#define LINK_TMR_H_BASEX 0x002f
-
-/* SGMII IF Mode defines */
-#define IF_MODE_USE_SGMII_AN 0x0002
-#define IF_MODE_SGMII_EN 0x0001
-#define IF_MODE_SGMII_SPEED_100M 0x0004
-#define IF_MODE_SGMII_SPEED_1G 0x0008
-#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
-
/* Num of additional exact match MAC adr regs */
#define MEMAC_NUM_OF_PADDRS 7
@@ -308,9 +278,6 @@ struct fman_mac {
struct memac_regs __iomem *regs;
/* MAC address of device */
u64 addr;
- /* Ethernet physical interface */
- phy_interface_t phy_if;
- u16 max_speed;
struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
@@ -323,9 +290,12 @@ struct fman_mac {
struct memac_cfg *memac_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
- bool basex_if;
- struct phy_device *pcsphy;
+ struct phy *serdes;
+ struct phylink_pcs *sgmii_pcs;
+ struct phylink_pcs *qsgmii_pcs;
+ struct phylink_pcs *xfi_pcs;
bool allmulti_enabled;
+ bool rgmii_no_half_duplex;
};
static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
@@ -383,7 +353,6 @@ static void set_exception(struct memac_regs __iomem *regs, u32 val,
}
static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
- phy_interface_t phy_if, u16 speed, bool slow_10g_if,
u32 exceptions)
{
u32 tmp;
@@ -411,41 +380,6 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
iowrite32be((u32)0, &regs->pause_thresh[0]);
- /* IF_MODE */
- tmp = 0;
- switch (phy_if) {
- case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_10G;
- break;
- case PHY_INTERFACE_MODE_MII:
- tmp |= IF_MODE_MII;
- break;
- default:
- tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII ||
- phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
- phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
- phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
- tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
- }
- iowrite32be(tmp, &regs->if_mode);
-
- /* TX_FIFO_SECTIONS */
- tmp = 0;
- if (phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (slow_10g_if) {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
- } else {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
- }
- } else {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
- }
- iowrite32be(tmp, &regs->tx_fifo_sections);
-
/* clear all pending events and set-up interrupts */
iowrite32be(0xffffffff, &regs->ievent);
set_exception(regs, exceptions, true);
@@ -485,93 +419,6 @@ static u32 get_mac_addr_hash_code(u64 eth_addr)
return xor_val;
}
-static void setup_sgmii_internal_phy(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
-{
- u16 tmp_reg16;
-
- if (WARN_ON(!memac->pcsphy))
- return;
-
- /* SGMII mode */
- tmp_reg16 = IF_MODE_SGMII_EN;
- if (!fixed_link)
- /* AN enable */
- tmp_reg16 |= IF_MODE_USE_SGMII_AN;
- else {
- switch (fixed_link->speed) {
- case 10:
- /* For 10M: IF_MODE[SPEED_10M] = 0 */
- break;
- case 100:
- tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
- break;
- case 1000:
- default:
- tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
- break;
- }
- if (!fixed_link->duplex)
- tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
- }
- phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
-
- /* Device ability according to SGMII specification */
- tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
-
- /* Adjust link timer for SGMII -
- * According to Cisco SGMII specification the timer should be 1.6 ms.
- * The link_timer register is configured in units of the clock.
- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
- * unit = 1 / (125*10^6 Hz) = 8 ns.
- * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
- * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
- * we always set up here a value of 2.5 SGMII.
- */
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
-
- if (!fixed_link)
- /* Restart AN */
- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
- else
- /* AN disabled */
- tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
- phy_write(memac->pcsphy, 0x0, tmp_reg16);
-}
-
-static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
-{
- u16 tmp_reg16;
-
- /* AN Device capability */
- tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
-
- /* Adjust link timer for SGMII -
- * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
- * The link_timer register is configured in units of the clock.
- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
- * unit = 1 / (125*10^6 Hz) = 8 ns.
- * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
- * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
- * we always set up here a value of 2.5 SGMII.
- */
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
-
- /* Restart AN */
- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
- phy_write(memac->pcsphy, 0x0, tmp_reg16);
-}
-
static int check_init_parameters(struct fman_mac *memac)
{
if (!memac->exception_cb) {
@@ -677,41 +524,31 @@ static void free_init_resources(struct fman_mac *memac)
memac->unicast_addr_hash = NULL;
}
-static bool is_init_done(struct memac_cfg *memac_drv_params)
-{
- /* Checks if mEMAC driver parameters were initialized */
- if (!memac_drv_params)
- return true;
-
- return false;
-}
-
static int memac_enable(struct fman_mac *memac)
{
- struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
+ int ret;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ ret = phy_init(memac->serdes);
+ if (ret) {
+ dev_err(memac->dev_id->dev,
+ "could not initialize serdes: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
- tmp = ioread32be(&regs->command_config);
- tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
- iowrite32be(tmp, &regs->command_config);
+ ret = phy_power_on(memac->serdes);
+ if (ret) {
+ dev_err(memac->dev_id->dev,
+ "could not power on serdes: %pe\n", ERR_PTR(ret));
+ phy_exit(memac->serdes);
+ }
- return 0;
+ return ret;
}
static void memac_disable(struct fman_mac *memac)
-
{
- struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
-
- WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
-
- tmp = ioread32be(&regs->command_config);
- tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
- iowrite32be(tmp, &regs->command_config);
+ phy_power_off(memac->serdes);
+ phy_exit(memac->serdes);
}
static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
@@ -719,9 +556,6 @@ static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (new_val)
tmp |= CMD_CFG_PROMIS_EN;
@@ -733,73 +567,12 @@ static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
return 0;
}
-static int memac_adjust_link(struct fman_mac *memac, u16 speed)
-{
- struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
-
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- tmp = ioread32be(&regs->if_mode);
-
- /* Set full duplex */
- tmp &= ~IF_MODE_HD;
-
- if (phy_interface_mode_is_rgmii(memac->phy_if)) {
- /* Configure RGMII in manual mode */
- tmp &= ~IF_MODE_RGMII_AUTO;
- tmp &= ~IF_MODE_RGMII_SP_MASK;
- /* Full duplex */
- tmp |= IF_MODE_RGMII_FD;
-
- switch (speed) {
- case SPEED_1000:
- tmp |= IF_MODE_RGMII_1000;
- break;
- case SPEED_100:
- tmp |= IF_MODE_RGMII_100;
- break;
- case SPEED_10:
- tmp |= IF_MODE_RGMII_10;
- break;
- default:
- break;
- }
- }
-
- iowrite32be(tmp, &regs->if_mode);
-
- return 0;
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
- mac_dev->update_speed(mac_dev, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->tx_fifo_sections);
GET_TX_EMPTY_DEFAULT_VALUE(tmp);
@@ -834,9 +607,6 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (en)
tmp &= ~CMD_CFG_PAUSE_IGNORE;
@@ -848,12 +618,175 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
+static void memac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+ unsigned long caps = config->mac_capabilities;
+
+ if (phy_interface_mode_is_rgmii(state->interface) &&
+ memac->rgmii_no_half_duplex)
+ caps &= ~(MAC_10HD | MAC_100HD);
+
+ phylink_validate_mask_caps(supported, state, caps);
+}
+
+/**
+ * memac_if_mode() - Convert an interface mode into an IF_MODE config
+ * @interface: A phy interface mode
+ *
+ * Return: A configuration word, suitable for programming into the lower bits
+ * of %IF_MODE.
+ */
+static u32 memac_if_mode(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return IF_MODE_MII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return IF_MODE_GMII | IF_MODE_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return IF_MODE_GMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return IF_MODE_10G;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
+{
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return memac->sgmii_pcs;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return memac->qsgmii_pcs;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return memac->xfi_pcs;
+ default:
+ return NULL;
+ }
+}
+
+static int memac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
+ iface);
+ default:
+ return 0;
+ }
+}
+
+static void memac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct memac_regs __iomem *regs = mac_dev->fman_mac->regs;
+ u32 tmp = ioread32be(&regs->if_mode);
+
+ tmp &= ~(IF_MODE_MASK | IF_MODE_RGMII);
+ tmp |= memac_if_mode(state->interface);
+ if (phylink_autoneg_inband(mode))
+ tmp |= IF_MODE_RGMII_AUTO;
+ iowrite32be(tmp, &regs->if_mode);
+}
+
+static void memac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *memac = mac_dev->fman_mac;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp = memac_if_mode(interface);
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+
+ memac_set_tx_pause_frames(memac, 0, pause_time, 0);
+ memac_accept_rx_pause_frames(memac, rx_pause);
+
+ if (duplex == DUPLEX_HALF)
+ tmp |= IF_MODE_HD;
+
+ switch (speed) {
+ case SPEED_1000:
+ tmp |= IF_MODE_RGMII_1000;
+ break;
+ case SPEED_100:
+ tmp |= IF_MODE_RGMII_100;
+ break;
+ case SPEED_10:
+ tmp |= IF_MODE_RGMII_10;
+ break;
+ }
+ iowrite32be(tmp, &regs->if_mode);
+
+ /* TODO: EEE? */
+
+ if (speed == SPEED_10000) {
+ if (memac->fm_rev_info.major == 6 &&
+ memac->fm_rev_info.minor == 4)
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G;
+ else
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_10G;
+ tmp |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G;
+ } else {
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_1G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G;
+ }
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ mac_dev->update_speed(mac_dev, speed);
+
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+}
+
+static void memac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ /* TODO: graceful */
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
+ iowrite32be(tmp, &regs->command_config);
+}
+
+static const struct phylink_mac_ops memac_mac_ops = {
+ .validate = memac_validate,
+ .mac_select_pcs = memac_select_pcs,
+ .mac_prepare = memac_prepare,
+ .mac_config = memac_mac_config,
+ .mac_link_up = memac_link_up,
+ .mac_link_down = memac_link_down,
+};
+
static int memac_modify_mac_address(struct fman_mac *memac,
const enet_addr_t *enet_addr)
{
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
@@ -867,9 +800,6 @@ static int memac_add_hash_mac_address(struct fman_mac *memac,
u32 hash;
u64 addr;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
if (!(addr & GROUP_ADDRESS)) {
@@ -898,9 +828,6 @@ static int memac_set_allmulti(struct fman_mac *memac, bool enable)
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
if (enable) {
for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
iowrite32be(entry | HASH_CTRL_MCAST_EN,
@@ -930,9 +857,6 @@ static int memac_del_hash_mac_address(struct fman_mac *memac,
u32 hash;
u64 addr;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
@@ -960,9 +884,6 @@ static int memac_set_exception(struct fman_mac *memac,
{
u32 bit_mask = 0;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -981,25 +902,16 @@ static int memac_set_exception(struct fman_mac *memac,
static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
- u8 i;
enet_addr_t eth_addr;
- bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link = NULL;
int err;
u32 reg32 = 0;
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
err = check_init_parameters(memac);
if (err)
return err;
memac_drv_param = memac->memac_drv_param;
- if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
- slow_10g_if = true;
-
/* First, reset the MAC if desired. */
if (memac_drv_param->reset_on_init) {
err = reset(memac->regs);
@@ -1015,10 +927,7 @@ static int memac_init(struct fman_mac *memac)
add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
- fixed_link = memac_drv_param->fixed_link;
-
- init(memac->regs, memac->memac_drv_param, memac->phy_if,
- memac->max_speed, slow_10g_if, memac->exceptions);
+ init(memac->regs, memac->memac_drv_param, memac->exceptions);
/* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
* Exists only in FMan 6.0 and 6.3.
@@ -1034,33 +943,6 @@ static int memac_init(struct fman_mac *memac)
iowrite32be(reg32, &memac->regs->command_config);
}
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
- /* Configure internal SGMII PHY */
- if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac);
- else
- setup_sgmii_internal_phy(memac, fixed_link);
- } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- /* Configure 4 internal SGMII PHYs */
- for (i = 0; i < 4; i++) {
- u8 qsmgii_phy_addr, phy_addr;
- /* QSGMII PHY address occupies 3 upper bits of 5-bit
- * phy_address; the lower 2 bits are used to extend
- * register address space and access each one of 4
- * ports inside QSGMII.
- */
- phy_addr = memac->pcsphy->mdio.addr;
- qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
- memac->pcsphy->mdio.addr = qsmgii_phy_addr;
- if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac);
- else
- setup_sgmii_internal_phy(memac, fixed_link);
-
- memac->pcsphy->mdio.addr = phy_addr;
- }
- }
-
/* Max Frame Length */
err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
memac_drv_param->max_frame_length);
@@ -1089,19 +971,28 @@ static int memac_init(struct fman_mac *memac)
fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
- kfree(memac_drv_param);
- memac->memac_drv_param = NULL;
-
return 0;
}
+static void pcs_put(struct phylink_pcs *pcs)
+{
+ struct mdio_device *mdiodev;
+
+ if (IS_ERR_OR_NULL(pcs))
+ return;
+
+ mdiodev = lynx_get_mdio_device(pcs);
+ lynx_pcs_destroy(pcs);
+ mdio_device_free(mdiodev);
+}
+
static int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
- if (memac->pcsphy)
- put_device(&memac->pcsphy->mdio.dev);
-
+ pcs_put(memac->sgmii_pcs);
+ pcs_put(memac->qsgmii_pcs);
+ pcs_put(memac->xfi_pcs);
kfree(memac->memac_drv_param);
kfree(memac);
@@ -1134,8 +1025,6 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev,
memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
memac->regs = mac_dev->vaddr;
- memac->max_speed = params->max_speed;
- memac->phy_if = mac_dev->phy_if;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
@@ -1143,7 +1032,6 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev,
memac->event_cb = params->event_cb;
memac->dev_id = mac_dev;
memac->fm = params->fm;
- memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
@@ -1151,101 +1039,221 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev,
return memac;
}
+static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
+ int index)
+{
+ struct device_node *node;
+ struct mdio_device *mdiodev = NULL;
+ struct phylink_pcs *pcs;
+
+ node = of_parse_phandle(mac_node, "pcsphy-handle", index);
+ if (node && of_device_is_available(node))
+ mdiodev = of_mdio_find_device(node);
+ of_node_put(node);
+
+ if (!mdiodev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ pcs = lynx_pcs_create(mdiodev);
+ return pcs;
+}
+
+static bool memac_supports(struct mac_device *mac_dev, phy_interface_t iface)
+{
+ /* If there's no serdes device, assume that it's been configured for
+ * whatever the default interface mode is.
+ */
+ if (!mac_dev->fman_mac->serdes)
+ return mac_dev->phy_if == iface;
+ /* Otherwise, ask the serdes */
+ return !phy_validate(mac_dev->fman_mac->serdes, PHY_MODE_ETHERNET,
+ iface, NULL);
+}
+
int memac_initialization(struct mac_device *mac_dev,
struct device_node *mac_node,
struct fman_mac_params *params)
{
int err;
- struct device_node *phy_node;
- struct fixed_phy_status *fixed_link;
+ struct device_node *fixed;
+ struct phylink_pcs *pcs;
struct fman_mac *memac;
+ unsigned long capabilities;
+ unsigned long *supported;
+ mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->set_multi = fman_set_multi;
- mac_dev->adjust_link = adjust_link_memac;
mac_dev->enable = memac_enable;
mac_dev->disable = memac_disable;
- if (params->max_speed == SPEED_10000)
- mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
-
mac_dev->fman_mac = memac_config(mac_dev, params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
+ if (!mac_dev->fman_mac)
+ return -EINVAL;
memac = mac_dev->fman_mac;
memac->memac_drv_param->max_frame_length = fman_get_max_frm();
memac->memac_drv_param->reset_on_init = true;
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
- memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
- if (!phy_node) {
- pr_err("PCS PHY node is not available\n");
- err = -EINVAL;
+
+ err = of_property_match_string(mac_node, "pcs-handle-names", "xfi");
+ if (err >= 0) {
+ memac->xfi_pcs = memac_pcs_create(mac_node, err);
+ if (IS_ERR(memac->xfi_pcs)) {
+ err = PTR_ERR(memac->xfi_pcs);
+ dev_err_probe(mac_dev->dev, err, "missing xfi pcs\n");
goto _return_fm_mac_free;
}
+ } else if (err != -EINVAL && err != -ENODATA) {
+ goto _return_fm_mac_free;
+ }
- memac->pcsphy = of_phy_find_device(phy_node);
- if (!memac->pcsphy) {
- pr_err("of_phy_find_device (PCS PHY) failed\n");
- err = -EINVAL;
+ err = of_property_match_string(mac_node, "pcs-handle-names", "qsgmii");
+ if (err >= 0) {
+ memac->qsgmii_pcs = memac_pcs_create(mac_node, err);
+ if (IS_ERR(memac->qsgmii_pcs)) {
+ err = PTR_ERR(memac->qsgmii_pcs);
+ dev_err_probe(mac_dev->dev, err,
+ "missing qsgmii pcs\n");
goto _return_fm_mac_free;
}
+ } else if (err != -EINVAL && err != -ENODATA) {
+ goto _return_fm_mac_free;
}
- if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
+ /* For compatibility, if pcs-handle-names is missing, we assume this
+ * phy is the first one in pcsphy-handle
+ */
+ err = of_property_match_string(mac_node, "pcs-handle-names", "sgmii");
+ if (err == -EINVAL || err == -ENODATA)
+ pcs = memac_pcs_create(mac_node, 0);
+ else if (err < 0)
+ goto _return_fm_mac_free;
+ else
+ pcs = memac_pcs_create(mac_node, err);
- err = of_phy_register_fixed_link(mac_node);
- if (err)
- goto _return_fm_mac_free;
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err_probe(mac_dev->dev, err, "missing pcs\n");
+ goto _return_fm_mac_free;
+ }
- fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
- if (!fixed_link) {
- err = -ENOMEM;
- goto _return_fm_mac_free;
- }
+ /* If err is set here, it means that pcs-handle-names was missing above
+ * (and therefore that xfi_pcs cannot be set). If we are defaulting to
+ * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
+ */
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ memac->xfi_pcs = pcs;
+ else
+ memac->sgmii_pcs = pcs;
+
+ memac->serdes = devm_of_phy_get(mac_dev->dev, mac_node, "serdes");
+ err = PTR_ERR(memac->serdes);
+ if (err == -ENODEV || err == -ENOSYS) {
+ dev_dbg(mac_dev->dev, "could not get (optional) serdes\n");
+ memac->serdes = NULL;
+ } else if (IS_ERR(memac->serdes)) {
+ dev_err_probe(mac_dev->dev, err, "could not get serdes\n");
+ goto _return_fm_mac_free;
+ }
- mac_dev->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
- of_node_put(mac_dev->phy_node);
- goto _return_fixed_link_free;
- }
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
+ /* TODO: The following interface modes are supported by (some) hardware
+ * but not by this driver:
+ * - 1000BASE-KX
+ * - 10GBASE-KR
+ * - XAUI/HiGig
+ */
+ supported = mac_dev->phylink_config.supported_interfaces;
- fixed_link->link = phy->link;
- fixed_link->speed = phy->speed;
- fixed_link->duplex = phy->duplex;
- fixed_link->pause = phy->pause;
- fixed_link->asym_pause = phy->asym_pause;
+ /* Note that half duplex is only supported on 10/100M interfaces. */
- put_device(&phy->mdio.dev);
- memac->memac_drv_param->fixed_link = fixed_link;
+ if (memac->sgmii_pcs &&
+ (memac_supports(mac_dev, PHY_INTERFACE_MODE_SGMII) ||
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_1000BASEX))) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
}
+ if (memac->sgmii_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_2500BASEX))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ if (memac->qsgmii_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_QSGMII))
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, supported);
+ else if (mac_dev->phy_if == PHY_INTERFACE_MODE_QSGMII)
+ dev_warn(mac_dev->dev, "no QSGMII pcs specified\n");
+
+ if (memac->xfi_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_10GBASER)) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ } else {
+ /* From what I can tell, no 10g macs support RGMII. */
+ phy_interface_set_rgmii(supported);
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ }
+
+ capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100;
+ capabilities |= MAC_1000FD | MAC_2500FD | MAC_10000FD;
+
+ /* These SoCs don't support half duplex at all; there's no different
+ * FMan version or compatible, so we just have to check the machine
+ * compatible instead
+ */
+ if (of_machine_is_compatible("fsl,ls1043a") ||
+ of_machine_is_compatible("fsl,ls1046a") ||
+ of_machine_is_compatible("fsl,B4QDS"))
+ capabilities &= ~(MAC_10HD | MAC_100HD);
+
+ mac_dev->phylink_config.mac_capabilities = capabilities;
+
+ /* The T2080 and T4240 don't support half duplex RGMII. There is no
+ * other way to identify these SoCs, so just use the machine
+ * compatible.
+ */
+ if (of_machine_is_compatible("fsl,T2080QDS") ||
+ of_machine_is_compatible("fsl,T2080RDB") ||
+ of_machine_is_compatible("fsl,T2081QDS") ||
+ of_machine_is_compatible("fsl,T4240QDS") ||
+ of_machine_is_compatible("fsl,T4240RDB"))
+ memac->rgmii_no_half_duplex = true;
+
+ /* Most boards should use MLO_AN_INBAND, but existing boards don't have
+ * a managed property. Default to MLO_AN_INBAND if nothing else is
+ * specified. We need to be careful and not enable this if we have a
+ * fixed link or if we are using MII or RGMII, since those
+ * configurations modes don't use in-band autonegotiation.
+ */
+ fixed = of_get_child_by_name(mac_node, "fixed-link");
+ if (!fixed && !of_property_read_bool(mac_node, "fixed-link") &&
+ !of_property_read_bool(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(mac_dev->phy_if))
+ mac_dev->phylink_config.ovr_an_inband = true;
+ of_node_put(fixed);
+
err = memac_init(mac_dev->fman_mac);
if (err < 0)
- goto _return_fixed_link_free;
+ goto _return_fm_mac_free;
dev_info(mac_dev->dev, "FMan MEMAC\n");
- goto _return;
+ return 0;
-_return_fixed_link_free:
- kfree(fixed_link);
_return_fm_mac_free:
memac_free(mac_dev->fman_mac);
-_return:
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 5a4be54ad459..c2261d26db5b 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -13,6 +13,7 @@
#include <linux/bitrev.h>
#include <linux/io.h>
#include <linux/crc32.h>
+#include <linux/netdevice.h>
/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
@@ -243,10 +244,6 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
static int check_init_parameters(struct fman_mac *tgec)
{
- if (tgec->max_speed < SPEED_10000) {
- pr_err("10G MAC driver only support 10G speed\n");
- return -EINVAL;
- }
if (!tgec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -384,40 +381,13 @@ static void free_init_resources(struct fman_mac *tgec)
tgec->unicast_addr_hash = NULL;
}
-static bool is_init_done(struct tgec_cfg *cfg)
-{
- /* Checks if tGEC driver parameters were initialized */
- if (!cfg)
- return true;
-
- return false;
-}
-
static int tgec_enable(struct fman_mac *tgec)
{
- struct tgec_regs __iomem *regs = tgec->regs;
- u32 tmp;
-
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
- tmp = ioread32be(&regs->command_config);
- tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
- iowrite32be(tmp, &regs->command_config);
-
return 0;
}
static void tgec_disable(struct fman_mac *tgec)
{
- struct tgec_regs __iomem *regs = tgec->regs;
- u32 tmp;
-
- WARN_ON_ONCE(!is_init_done(tgec->cfg));
-
- tmp = ioread32be(&regs->command_config);
- tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
- iowrite32be(tmp, &regs->command_config);
}
static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
@@ -425,9 +395,6 @@ static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (new_val)
tmp |= CMD_CFG_PROMIS_EN;
@@ -444,9 +411,6 @@ static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
{
struct tgec_regs __iomem *regs = tgec->regs;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
iowrite32be((u32)pause_time, &regs->pause_quant);
return 0;
@@ -457,9 +421,6 @@ static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (!en)
tmp |= CMD_CFG_PAUSE_IGNORE;
@@ -470,12 +431,52 @@ static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
+static void tgec_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void tgec_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *tgec = mac_dev->fman_mac;
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+ u32 tmp;
+
+ tgec_set_tx_pause_frames(tgec, 0, pause_time, 0);
+ tgec_accept_rx_pause_frames(tgec, rx_pause);
+ mac_dev->update_speed(mac_dev, speed);
+
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+}
+
+static void tgec_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct fman_mac *tgec = fman_config_to_mac(config)->fman_mac;
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
+ iowrite32be(tmp, &regs->command_config);
+}
+
+static const struct phylink_mac_ops tgec_mac_ops = {
+ .mac_config = tgec_mac_config,
+ .mac_link_up = tgec_link_up,
+ .mac_link_down = tgec_link_down,
+};
+
static int tgec_modify_mac_address(struct fman_mac *tgec,
const enet_addr_t *p_enet_addr)
{
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
@@ -490,9 +491,6 @@ static int tgec_add_hash_mac_address(struct fman_mac *tgec,
u32 crc = 0xFFFFFFFF, hash;
u64 addr;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
if (!(addr & GROUP_ADDRESS)) {
@@ -525,9 +523,6 @@ static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
if (enable) {
for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
iowrite32be(entry | TGEC_HASH_MCAST_EN,
@@ -548,9 +543,6 @@ static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (enable)
@@ -572,9 +564,6 @@ static int tgec_del_hash_mac_address(struct fman_mac *tgec,
u32 crc = 0xFFFFFFFF, hash;
u64 addr;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
addr = ((*(u64 *)eth_addr) >> 16);
/* CRC calculation */
@@ -601,22 +590,12 @@ static int tgec_del_hash_mac_address(struct fman_mac *tgec,
return 0;
}
-static void tgec_adjust_link(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
-
- mac_dev->update_speed(mac_dev, phy_dev->speed);
-}
-
static int tgec_set_exception(struct fman_mac *tgec,
enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -641,9 +620,6 @@ static int tgec_init(struct fman_mac *tgec)
enet_addr_t eth_addr;
int err;
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
if (DEFAULT_RESET_ON_INIT &&
(fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
pr_err("Can't reset MAC!\n");
@@ -753,7 +729,6 @@ static struct fman_mac *tgec_config(struct mac_device *mac_dev,
tgec->regs = mac_dev->vaddr;
tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
TGEC_IMASK_REM_FAULT |
@@ -788,17 +763,15 @@ int tgec_initialization(struct mac_device *mac_dev,
int err;
struct fman_mac *tgec;
+ mac_dev->phylink_ops = &tgec_mac_ops;
mac_dev->set_promisc = tgec_set_promiscuous;
mac_dev->change_addr = tgec_modify_mac_address;
mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
mac_dev->set_tstamp = tgec_set_tstamp;
mac_dev->set_multi = fman_set_multi;
- mac_dev->adjust_link = tgec_adjust_link;
mac_dev->enable = tgec_enable;
mac_dev->disable = tgec_disable;
@@ -808,6 +781,19 @@ int tgec_initialization(struct mac_device *mac_dev,
goto _return;
}
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * XAUI, so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XAUI;
+
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ mac_dev->phylink_config.supported_interfaces);
+ mac_dev->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10000FD;
+
tgec = mac_dev->fman_mac;
tgec->cfg->max_frame_length = fman_get_max_frm();
err = tgec_init(tgec);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 13e67f2864be..43665806c590 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -15,6 +15,7 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
@@ -93,130 +94,8 @@ int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
return 0;
}
-/**
- * fman_set_mac_active_pause
- * @mac_dev: A pointer to the MAC device
- * @rx: Pause frame setting for RX
- * @tx: Pause frame setting for TX
- *
- * Set the MAC RX/TX PAUSE frames settings
- *
- * Avoid redundant calls to FMD, if the MAC driver already contains the desired
- * active PAUSE settings. Otherwise, the new active settings should be reflected
- * in FMan.
- *
- * Return: 0 on success; Error code otherwise.
- */
-int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
-{
- struct fman_mac *fman_mac = mac_dev->fman_mac;
- int err = 0;
-
- if (rx != mac_dev->rx_pause_active) {
- err = mac_dev->set_rx_pause(fman_mac, rx);
- if (likely(err == 0))
- mac_dev->rx_pause_active = rx;
- }
-
- if (tx != mac_dev->tx_pause_active) {
- u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
- FSL_FM_PAUSE_TIME_DISABLE);
-
- err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
-
- if (likely(err == 0))
- mac_dev->tx_pause_active = tx;
- }
-
- return err;
-}
-EXPORT_SYMBOL(fman_set_mac_active_pause);
-
-/**
- * fman_get_pause_cfg
- * @mac_dev: A pointer to the MAC device
- * @rx_pause: Return value for RX setting
- * @tx_pause: Return value for TX setting
- *
- * Determine the MAC RX/TX PAUSE frames settings based on PHY
- * autonegotiation or values set by eththool.
- *
- * Return: Pointer to FMan device.
- */
-void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
- bool *tx_pause)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- u16 lcl_adv, rmt_adv;
- u8 flowctrl;
-
- *rx_pause = *tx_pause = false;
-
- if (!phy_dev->duplex)
- return;
-
- /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
- * are those set by ethtool.
- */
- if (!mac_dev->autoneg_pause) {
- *rx_pause = mac_dev->rx_pause_req;
- *tx_pause = mac_dev->tx_pause_req;
- return;
- }
-
- /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
- * settings depend on the result of the link negotiation.
- */
-
- /* get local capabilities */
- lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
-
- /* get link partner capabilities */
- rmt_adv = 0;
- if (phy_dev->pause)
- rmt_adv |= LPA_PAUSE_CAP;
- if (phy_dev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
-
- /* Calculate TX/RX settings based on local and peer advertised
- * symmetric/asymmetric PAUSE capabilities.
- */
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
- if (flowctrl & FLOW_CTRL_RX)
- *rx_pause = true;
- if (flowctrl & FLOW_CTRL_TX)
- *tx_pause = true;
-}
-EXPORT_SYMBOL(fman_get_pause_cfg);
-
-#define DTSEC_SUPPORTED \
- (SUPPORTED_10baseT_Half \
- | SUPPORTED_10baseT_Full \
- | SUPPORTED_100baseT_Half \
- | SUPPORTED_100baseT_Full \
- | SUPPORTED_Autoneg \
- | SUPPORTED_Pause \
- | SUPPORTED_Asym_Pause \
- | SUPPORTED_FIBRE \
- | SUPPORTED_MII)
-
static DEFINE_MUTEX(eth_lock);
-static const u16 phy2speed[] = {
- [PHY_INTERFACE_MODE_MII] = SPEED_100,
- [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
- [PHY_INTERFACE_MODE_RMII] = SPEED_100,
- [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
- [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
-};
-
static struct platform_device *dpaa_eth_add_device(int fman_id,
struct mac_device *mac_dev)
{
@@ -263,8 +142,8 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
- { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
{ .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
@@ -295,6 +174,7 @@ static int mac_probe(struct platform_device *_of_dev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(_of_dev, mac_dev);
/* Save private information */
mac_dev->priv = priv;
@@ -424,57 +304,21 @@ static int mac_probe(struct platform_device *_of_dev)
}
mac_dev->phy_if = phy_if;
- priv->speed = phy2speed[mac_dev->phy_if];
- params.max_speed = priv->speed;
- mac_dev->if_support = DTSEC_SUPPORTED;
- /* We don't support half-duplex in SGMII mode */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
- mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_100baseT_Half);
-
- /* Gigabit support (no half-duplex) */
- if (params.max_speed == 1000)
- mac_dev->if_support |= SUPPORTED_1000baseT_Full;
-
- /* The 10G interface only supports one mode */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->if_support = SUPPORTED_10000baseT_Full;
-
- /* Get the rest of the PHY information */
- mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
-
- params.basex_if = false;
params.mac_id = priv->cell_index;
params.fm = (void *)priv->fman;
params.exception_cb = mac_exception;
params.event_cb = mac_exception;
err = init(mac_dev, mac_node, &params);
- if (err < 0) {
- dev_err(dev, "mac_dev->init() = %d\n", err);
- of_node_put(mac_dev->phy_node);
- return err;
- }
-
- /* pause frame autonegotiation enabled */
- mac_dev->autoneg_pause = true;
-
- /* By intializing the values to false, force FMD to enable PAUSE frames
- * on RX and TX
- */
- mac_dev->rx_pause_req = true;
- mac_dev->tx_pause_req = true;
- mac_dev->rx_pause_active = false;
- mac_dev->tx_pause_active = false;
- err = fman_set_mac_active_pause(mac_dev, true, true);
if (err < 0)
- dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
+ return err;
if (!is_zero_ether_addr(mac_dev->addr))
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
+ err = PTR_ERR(priv->eth_dev);
dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
priv->cell_index);
priv->eth_dev = NULL;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 13b69ca5f00c..ad06f8d7924b 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/if_ether.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/list.h>
#include "fman_port.h"
@@ -24,32 +25,22 @@ struct mac_device {
struct resource *res;
u8 addr[ETH_ALEN];
struct fman_port *port[2];
- u32 if_support;
- struct phy_device *phy_dev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
phy_interface_t phy_if;
- struct device_node *phy_node;
- struct net_device *net_dev;
- bool autoneg_pause;
- bool rx_pause_req;
- bool tx_pause_req;
- bool rx_pause_active;
- bool tx_pause_active;
bool promisc;
bool allmulti;
+ const struct phylink_mac_ops *phylink_ops;
int (*enable)(struct fman_mac *mac_dev);
void (*disable)(struct fman_mac *mac_dev);
- void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
- int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
- int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
- u16 pause_time, u16 thresh_time);
int (*set_exception)(struct fman_mac *mac_dev,
enum fman_mac_exceptions exception, bool enable);
int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
@@ -63,6 +54,12 @@ struct mac_device {
struct mac_priv_s *priv;
};
+static inline struct mac_device
+*fman_config_to_mac(struct phylink_config *config)
+{
+ return container_of(config, struct mac_device, phylink_config);
+}
+
struct dpaa_eth_data {
struct mac_device *mac_dev;
int mac_hw_id;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
index d50c222948b4..4fbeb3fd71a8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
@@ -3,14 +3,7 @@
#include "funeth.h"
#include "funeth_devlink.h"
-static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
- struct netlink_ext_ack *extack)
-{
- return devlink_info_driver_name_put(req, KBUILD_MODNAME);
-}
-
static const struct devlink_ops fun_dl_ops = {
- .info_get = fun_dl_info_get,
};
struct devlink *fun_devlink_alloc(struct device *dev)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index 095f51c4d9d9..b4cce30e526a 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1178,13 +1178,6 @@ static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static struct devlink_port *fun_get_devlink_port(struct net_device *netdev)
-{
- struct funeth_priv *fp = netdev_priv(netdev);
-
- return &fp->dl_port;
-}
-
static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
{
if (ed->num_vports)
@@ -1350,7 +1343,6 @@ static const struct net_device_ops fun_netdev_ops = {
.ndo_set_vf_vlan = fun_set_vf_vlan,
.ndo_set_vf_rate = fun_set_vf_rate,
.ndo_get_vf_config = fun_get_vf_config,
- .ndo_get_devlink_port = fun_get_devlink_port,
};
#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
@@ -1760,6 +1752,7 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
goto free_rss;
SET_NETDEV_DEV(netdev, fdev->dev);
+ SET_NETDEV_DEVLINK_PORT(netdev, &fp->dl_port);
netdev->netdev_ops = &fun_netdev_ops;
netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
@@ -1800,9 +1793,6 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
rc = register_netdev(netdev);
if (rc)
goto unreg_devlink;
-
- devlink_port_type_eth_set(&fp->dl_port, netdev);
-
return 0;
unreg_devlink:
@@ -1827,7 +1817,6 @@ static void fun_destroy_netdev(struct net_device *netdev)
struct funeth_priv *fp;
fp = netdev_priv(netdev);
- devlink_port_type_clear(&fp->dl_port);
unregister_netdev(netdev);
devlink_port_unregister(&fp->dl_port);
fun_ktls_cleanup(fp);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 671f51135c26..53b7e95213a8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -206,9 +206,9 @@ struct funeth_rxq {
#define FUN_QSTAT_READ(q, seq, stats_copy) \
do { \
- seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
+ seq = u64_stats_fetch_begin(&(q)->syncp); \
stats_copy = (q)->stats; \
- } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
+ } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 160735484465..64eb0442c82f 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -60,7 +60,8 @@ struct gve_rx_slot_page_info {
void *page_address;
u32 page_offset; /* offset to write to in page */
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
- u8 can_flip;
+ u16 pad; /* adjustment for rx padding */
+ u8 can_flip; /* tracks if the networking stack is using the page */
};
/* A list of pages registered with the device during setup and used by a queue
@@ -149,10 +150,17 @@ struct gve_rx_ctx {
/* head and tail of skb chain for the current packet or NULL if none */
struct sk_buff *skb_head;
struct sk_buff *skb_tail;
- u16 total_expected_size;
- u8 expected_frag_cnt;
- u8 curr_frag_cnt;
- u8 reuse_frags;
+ u32 total_size;
+ u8 frag_cnt;
+ bool drop_pkt;
+};
+
+struct gve_rx_cnts {
+ u32 ok_pkt_bytes;
+ u16 ok_pkt_cnt;
+ u16 total_pkt_cnt;
+ u16 cont_pkt_cnt;
+ u16 desc_err_pkt_cnt;
};
/* Contains datapath state used to represent an RX queue. */
@@ -167,6 +175,10 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
u16 packet_buffer_size;
+
+ u32 qpl_copy_pool_mask;
+ u32 qpl_copy_pool_head;
+ struct gve_rx_slot_page_info *qpl_copy_pool;
};
/* DQO fields. */
@@ -216,7 +228,9 @@ struct gve_rx_ring {
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
- u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
+ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
+ u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
+
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
@@ -549,6 +563,7 @@ struct gve_priv {
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
u32 adminq_get_ptype_map_cnt;
+ u32 adminq_verify_driver_compatibility_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index f7621ab672b9..60061288ad9d 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -289,7 +289,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
return -ENOMEM;
case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
default:
dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
return -EINVAL;
@@ -407,6 +407,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
+ case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
+ priv->adminq_verify_driver_compatibility_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -878,6 +881,22 @@ int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
return gve_adminq_execute_cmd(priv, &cmd);
}
+int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
+ u64 driver_info_len,
+ dma_addr_t driver_info_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
+ cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
+ .driver_info_len = cpu_to_be64(driver_info_len),
+ .driver_info_addr = cpu_to_be64(driver_info_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_report_link_speed(struct gve_priv *priv)
{
union gve_adminq_command gvnic_cmd;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 83c0b40cd2d9..cf29662e6ad1 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -24,6 +24,7 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_REPORT_STATS = 0xC,
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
+ GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
};
/* Admin queue status codes */
@@ -146,6 +147,51 @@ enum gve_sup_feature_mask {
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
+#define GVE_VERSION_STR_LEN 128
+
+enum gve_driver_capbility {
+ gve_driver_capability_gqi_qpl = 0,
+ gve_driver_capability_gqi_rda = 1,
+ gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
+ gve_driver_capability_dqo_rda = 3,
+ gve_driver_capability_alt_miss_compl = 4,
+};
+
+#define GVE_CAP1(a) BIT((int)a)
+#define GVE_CAP2(a) BIT(((int)a) - 64)
+#define GVE_CAP3(a) BIT(((int)a) - 128)
+#define GVE_CAP4(a) BIT(((int)a) - 192)
+
+#define GVE_DRIVER_CAPABILITY_FLAGS1 \
+ (GVE_CAP1(gve_driver_capability_gqi_qpl) | \
+ GVE_CAP1(gve_driver_capability_gqi_rda) | \
+ GVE_CAP1(gve_driver_capability_dqo_rda) | \
+ GVE_CAP1(gve_driver_capability_alt_miss_compl))
+
+#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
+#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
+#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
+
+struct gve_driver_info {
+ u8 os_type; /* 0x01 = Linux */
+ u8 driver_major;
+ u8 driver_minor;
+ u8 driver_sub;
+ __be32 os_version_major;
+ __be32 os_version_minor;
+ __be32 os_version_sub;
+ __be64 driver_capability_flags[4];
+ u8 os_version_str1[GVE_VERSION_STR_LEN];
+ u8 os_version_str2[GVE_VERSION_STR_LEN];
+};
+
+struct gve_adminq_verify_driver_compatibility {
+ __be64 driver_info_len;
+ __be64 driver_info_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
+
struct gve_adminq_configure_device_resources {
__be64 counter_array;
__be64 irq_db_addr;
@@ -345,6 +391,8 @@ union gve_adminq_command {
struct gve_adminq_report_stats report_stats;
struct gve_adminq_report_link_speed report_link_speed;
struct gve_adminq_get_ptype_map get_ptype_map;
+ struct gve_adminq_verify_driver_compatibility
+ verify_driver_compatibility;
};
};
u8 reserved[64];
@@ -372,6 +420,9 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
+int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
+ u64 driver_info_len,
+ dma_addr_t driver_info_addr);
int gve_adminq_report_link_speed(struct gve_priv *priv);
struct gve_ptype_lut;
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index e8fe9adef7f2..f79cd0591110 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -176,6 +176,11 @@ static_assert(sizeof(struct gve_tx_compl_desc) == 8);
#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
+/* The most significant bit in the completion tag can change the completion
+ * type from packet completion to miss path completion.
+ */
+#define GVE_ALT_MISS_COMPL_BIT BIT(15)
+
/* Descriptor to post buffers to HW on buffer queue. */
struct gve_rx_desc_dqo {
__le16 buf_id; /* ID returned in Rx completion descriptor */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 7b9a2d9d9624..ce574d097e28 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -45,6 +45,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
+ "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -177,14 +178,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -198,10 +199,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -259,18 +260,19 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
- u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
+ data[i++] = rx->rx_frag_alloc_cnt;
/* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail +
@@ -331,9 +333,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
do {
start =
- u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index d3e3ac242bfc..5b40f9c53196 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -12,6 +12,8 @@
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include <net/sch_generic.h>
#include "gve.h"
#include "gve_dqo.h"
@@ -30,6 +32,49 @@
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
+static int gve_verify_driver_compatibility(struct gve_priv *priv)
+{
+ int err;
+ struct gve_driver_info *driver_info;
+ dma_addr_t driver_info_bus;
+
+ driver_info = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_driver_info),
+ &driver_info_bus, GFP_KERNEL);
+ if (!driver_info)
+ return -ENOMEM;
+
+ *driver_info = (struct gve_driver_info) {
+ .os_type = 1, /* Linux */
+ .os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
+ .os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
+ .os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
+ .driver_capability_flags = {
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
+ },
+ };
+ strscpy(driver_info->os_version_str1, utsname()->release,
+ sizeof(driver_info->os_version_str1));
+ strscpy(driver_info->os_version_str2, utsname()->version,
+ sizeof(driver_info->os_version_str2));
+
+ err = gve_adminq_verify_driver_compatibility(priv,
+ sizeof(struct gve_driver_info),
+ driver_info_bus);
+
+ /* It's ok if the device doesn't support this */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_driver_info),
+ driver_info, driver_info_bus);
+ return err;
+}
+
static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -51,10 +96,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -64,10 +109,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -1273,9 +1318,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
do {
- start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
@@ -1368,6 +1413,13 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
return err;
}
+ err = gve_verify_driver_compatibility(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Could not verify driver compatibility: err=%d\n", err);
+ goto err;
+ }
+
if (skip_describe_device)
goto setup_device;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 021bbf308d68..1f55137722b0 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -35,6 +35,12 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
+
+ for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
+ page_ref_sub(rx->qpl_copy_pool[i].page,
+ rx->qpl_copy_pool[i].pagecnt_bias - 1);
+ put_page(rx->qpl_copy_pool[i].page);
+ }
}
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -63,6 +69,10 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
rx->data.data_ring = NULL;
+
+ kvfree(rx->qpl_copy_pool);
+ rx->qpl_copy_pool = NULL;
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
@@ -101,6 +111,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
u32 slots;
int err;
int i;
+ int j;
/* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two.
@@ -135,7 +146,33 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
goto alloc_err;
}
+ if (!rx->data.raw_addressing) {
+ for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
+ struct page *page = alloc_page(GFP_KERNEL);
+
+ if (!page) {
+ err = -ENOMEM;
+ goto alloc_err_qpl;
+ }
+
+ rx->qpl_copy_pool[j].page = page;
+ rx->qpl_copy_pool[j].page_offset = 0;
+ rx->qpl_copy_pool[j].page_address = page_address(page);
+
+ /* The page already has 1 ref. */
+ page_ref_add(page, INT_MAX - 1);
+ rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
+ }
+ }
+
return slots;
+
+alloc_err_qpl:
+ while (j--) {
+ page_ref_sub(rx->qpl_copy_pool[j].page,
+ rx->qpl_copy_pool[j].pagecnt_bias - 1);
+ put_page(rx->qpl_copy_pool[j].page);
+ }
alloc_err:
while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
@@ -146,12 +183,11 @@ alloc_err:
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
- ctx->curr_frag_cnt = 0;
- ctx->total_expected_size = 0;
- ctx->expected_frag_cnt = 0;
ctx->skb_head = NULL;
ctx->skb_tail = NULL;
- ctx->reuse_frags = false;
+ ctx->total_size = 0;
+ ctx->frag_cnt = 0;
+ ctx->drop_pkt = false;
}
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
@@ -181,10 +217,22 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
GFP_KERNEL);
if (!rx->data.data_ring)
return -ENOMEM;
+
+ rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
+ rx->qpl_copy_pool_head = 0;
+ rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
+ sizeof(rx->qpl_copy_pool[0]),
+ GFP_KERNEL);
+
+ if (!rx->qpl_copy_pool) {
+ err = -ENOMEM;
+ goto abort_with_slots;
+ }
+
filled_pages = gve_prefill_rx_pages(rx);
if (filled_pages < 0) {
err = -ENOMEM;
- goto abort_with_slots;
+ goto abort_with_copy_pool;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -236,6 +284,9 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx);
+abort_with_copy_pool:
+ kvfree(rx->qpl_copy_pool);
+ rx->qpl_copy_pool = NULL;
abort_with_slots:
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
@@ -292,30 +343,47 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx)
-{
- return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0;
-}
-
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 packet_buffer_size, u16 len,
struct gve_rx_ctx *ctx)
{
- u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx);
- struct sk_buff *skb;
+ u32 offset = page_info->page_offset + page_info->pad;
+ struct sk_buff *skb = ctx->skb_tail;
+ int num_frags = 0;
- if (!ctx->skb_head)
- ctx->skb_head = napi_get_frags(napi);
+ if (!skb) {
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb))
+ return NULL;
- if (unlikely(!ctx->skb_head))
- return NULL;
+ ctx->skb_head = skb;
+ ctx->skb_tail = skb;
+ } else {
+ num_frags = skb_shinfo(ctx->skb_tail)->nr_frags;
+ if (num_frags == MAX_SKB_FRAGS) {
+ skb = napi_alloc_skb(napi, 0);
+ if (!skb)
+ return NULL;
- skb = ctx->skb_head;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ // We will never chain more than two SKBs: 2 * 16 * 2k > 64k
+ // which is why we do not need to chain by using skb->next
+ skb_shinfo(ctx->skb_tail)->frag_list = skb;
+
+ ctx->skb_tail = skb;
+ num_frags = 0;
+ }
+ }
+
+ if (skb != ctx->skb_head) {
+ ctx->skb_head->len += len;
+ ctx->skb_head->data_len += len;
+ ctx->skb_head->truesize += packet_buffer_size;
+ }
+ skb_add_rx_frag(skb, num_frags, page_info->page,
offset, len, packet_buffer_size);
- return skb;
+ return ctx->skb_head;
}
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
@@ -363,6 +431,92 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
return skb;
}
+static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ u16 len, struct napi_struct *napi)
+{
+ u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask;
+ void *src = page_info->page_address + page_info->page_offset;
+ struct gve_rx_slot_page_info *copy_page_info;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ bool alloc_page = false;
+ struct sk_buff *skb;
+ void *dst;
+
+ copy_page_info = &rx->qpl_copy_pool[pool_idx];
+ if (!copy_page_info->can_flip) {
+ int recycle = gve_rx_can_recycle_buffer(copy_page_info);
+
+ if (unlikely(recycle < 0)) {
+ gve_schedule_reset(rx->gve);
+ return NULL;
+ }
+ alloc_page = !recycle;
+ }
+
+ if (alloc_page) {
+ struct gve_rx_slot_page_info alloc_page_info;
+ struct page *page;
+
+ /* The least recently used page turned out to be
+ * still in use by the kernel. Ignoring it and moving
+ * on alleviates head-of-line blocking.
+ */
+ rx->qpl_copy_pool_head++;
+
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+ alloc_page_info.page = page;
+ alloc_page_info.page_offset = 0;
+ alloc_page_info.page_address = page_address(page);
+ alloc_page_info.pad = page_info->pad;
+
+ memcpy(alloc_page_info.page_address, src, page_info->pad + len);
+ skb = gve_rx_add_frags(napi, &alloc_page_info,
+ rx->packet_buffer_size,
+ len, ctx);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_copy_cnt++;
+ rx->rx_frag_alloc_cnt++;
+ u64_stats_update_end(&rx->statss);
+
+ return skb;
+ }
+
+ dst = copy_page_info->page_address + copy_page_info->page_offset;
+ memcpy(dst, src, page_info->pad + len);
+ copy_page_info->pad = page_info->pad;
+
+ skb = gve_rx_add_frags(napi, copy_page_info,
+ rx->packet_buffer_size, len, ctx);
+ if (unlikely(!skb))
+ return NULL;
+
+ gve_dec_pagecnt_bias(copy_page_info);
+ copy_page_info->page_offset += rx->packet_buffer_size;
+ copy_page_info->page_offset &= (PAGE_SIZE - 1);
+
+ if (copy_page_info->can_flip) {
+ /* We have used both halves of this copy page, it
+ * is time for it to go to the back of the queue.
+ */
+ copy_page_info->can_flip = false;
+ rx->qpl_copy_pool_head++;
+ prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page);
+ } else {
+ copy_page_info->can_flip = true;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_copy_cnt++;
+ u64_stats_update_end(&rx->statss);
+
+ return skb;
+}
+
static struct sk_buff *
gve_rx_qpl(struct device *dev, struct net_device *netdev,
struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
@@ -377,7 +531,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* choice is to copy the data out of it so that we can return it to the
* device.
*/
- if (ctx->reuse_frags) {
+ if (page_info->can_flip) {
skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
@@ -386,116 +540,23 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
- const u16 padding = gve_rx_ctx_padding(ctx);
-
- skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx);
- if (skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_frag_copy_cnt++;
- u64_stats_update_end(&rx->statss);
- }
+ skb = gve_rx_copy_to_pool(rx, page_info, len, napi);
}
return skb;
}
-#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
-static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc)
-{
- return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx);
-}
-
-static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
-{
- bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false;
- bool buffer_error = false, desc_error = false, seqno_error = false;
- struct gve_rx_slot_page_info *page_info;
- struct gve_priv *priv = rx->gve;
- u32 idx = rx->cnt & rx->mask;
- bool reuse_frags, can_flip;
- struct gve_rx_desc *desc;
- u16 packet_size = 0;
- u16 n_frags = 0;
- int recycle;
-
- /** In QPL mode, we only flip buffers when all buffers containing the packet
- * can be flipped. RDA can_flip decisions will be made later, per frag.
- */
- can_flip = qpl_mode;
- reuse_frags = can_flip;
- do {
- u16 frag_size;
-
- n_frags++;
- desc = &rx->desc.desc_ring[idx];
- desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error;
- if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) {
- seqno_error = true;
- netdev_warn(priv->dev,
- "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.",
- rx->desc.seqno, GVE_SEQNO(desc->flags_seq));
- }
- frag_size = be16_to_cpu(desc->len);
- packet_size += frag_size;
- if (frag_size > rx->packet_buffer_size) {
- packet_size_error = true;
- netdev_warn(priv->dev,
- "RX fragment error: packet_buffer_size=%d, frag_size=%d, dropping packet.",
- rx->packet_buffer_size, be16_to_cpu(desc->len));
- }
- page_info = &rx->data.page_info[idx];
- if (can_flip) {
- recycle = gve_rx_can_recycle_buffer(page_info);
- reuse_frags = reuse_frags && recycle > 0;
- buffer_error = buffer_error || unlikely(recycle < 0);
- }
- idx = (idx + 1) & rx->mask;
- rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
- } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq));
-
- prefetch(rx->desc.desc_ring + idx);
-
- ctx->curr_frag_cnt = 0;
- ctx->total_expected_size = packet_size - GVE_RX_PAD;
- ctx->expected_frag_cnt = n_frags;
- ctx->skb_head = NULL;
- ctx->reuse_frags = reuse_frags;
-
- if (ctx->expected_frag_cnt > 1) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_cont_packet_cnt++;
- u64_stats_update_end(&rx->statss);
- }
- if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
- u64_stats_update_end(&rx->statss);
- }
-
- if (unlikely(buffer_error || seqno_error || packet_size_error)) {
- gve_schedule_reset(priv);
- return false;
- }
-
- if (unlikely(desc_error)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_desc_err_dropped_pkt++;
- u64_stats_update_end(&rx->statss);
- return false;
- }
- return true;
-}
-
static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
- u16 len, union gve_rx_data_slot *data_slot)
+ u16 len, union gve_rx_data_slot *data_slot,
+ bool is_only_frag)
{
struct net_device *netdev = priv->dev;
struct gve_rx_ctx *ctx = &rx->ctx;
struct sk_buff *skb = NULL;
- if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) {
+ if (len <= priv->rx_copybreak && is_only_frag) {
/* Just copy small packets */
- skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx);
+ skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
if (skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -504,29 +565,25 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
} else {
- if (rx->data.raw_addressing) {
- int recycle = gve_rx_can_recycle_buffer(page_info);
+ int recycle = gve_rx_can_recycle_buffer(page_info);
- if (unlikely(recycle < 0)) {
- gve_schedule_reset(priv);
- return NULL;
- }
- page_info->can_flip = recycle;
- if (page_info->can_flip) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_frag_flip_cnt++;
- u64_stats_update_end(&rx->statss);
- }
+ if (unlikely(recycle < 0)) {
+ gve_schedule_reset(priv);
+ return NULL;
+ }
+ page_info->can_flip = recycle;
+ if (page_info->can_flip) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_flip_cnt++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ if (rx->data.raw_addressing) {
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
rx->packet_buffer_size, ctx);
} else {
- if (ctx->reuse_frags) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_frag_flip_cnt++;
- u64_stats_update_end(&rx->statss);
- }
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
}
@@ -534,101 +591,113 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
return skb;
}
-static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
- u64 *packet_size_bytes, u32 *work_done)
+#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
+static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
+ struct gve_rx_desc *desc, u32 idx,
+ struct gve_rx_cnts *cnts)
{
+ bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq);
struct gve_rx_slot_page_info *page_info;
+ u16 frag_size = be16_to_cpu(desc->len);
struct gve_rx_ctx *ctx = &rx->ctx;
union gve_rx_data_slot *data_slot;
struct gve_priv *priv = rx->gve;
- struct gve_rx_desc *first_desc;
struct sk_buff *skb = NULL;
- struct gve_rx_desc *desc;
- struct napi_struct *napi;
dma_addr_t page_bus;
- u32 work_cnt = 0;
void *va;
- u32 idx;
- u16 len;
- idx = rx->cnt & rx->mask;
- first_desc = &rx->desc.desc_ring[idx];
- desc = first_desc;
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ bool is_first_frag = ctx->frag_cnt == 0;
- if (unlikely(!gve_rx_ctx_init(ctx, rx)))
- goto skb_alloc_fail;
+ bool is_only_frag = is_first_frag && is_last_frag;
- while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
- /* Prefetch two packet buffers ahead, we will need it soon. */
- page_info = &rx->data.page_info[(idx + 2) & rx->mask];
- va = page_info->page_address + page_info->page_offset;
+ if (unlikely(ctx->drop_pkt))
+ goto finish_frag;
- prefetch(page_info->page); /* Kernel page struct. */
- prefetch(va); /* Packet header. */
- prefetch(va + 64); /* Next cacheline too. */
+ if (desc->flags_seq & GVE_RXF_ERR) {
+ ctx->drop_pkt = true;
+ cnts->desc_err_pkt_cnt++;
+ napi_free_frags(napi);
+ goto finish_frag;
+ }
- len = gve_rx_get_fragment_size(ctx, desc);
+ if (unlikely(frag_size > rx->packet_buffer_size)) {
+ netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset",
+ frag_size, rx->packet_buffer_size);
+ ctx->drop_pkt = true;
+ napi_free_frags(napi);
+ gve_schedule_reset(rx->gve);
+ goto finish_frag;
+ }
- page_info = &rx->data.page_info[idx];
- data_slot = &rx->data.data_ring[idx];
- page_bus = rx->data.raw_addressing ?
- be64_to_cpu(data_slot->addr) - page_info->page_offset :
- rx->data.qpl->page_buses[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- goto skb_alloc_fail;
+ /* Prefetch two packet buffers ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + page_info->page_offset;
+ prefetch(page_info->page); /* Kernel page struct. */
+ prefetch(va); /* Packet header. */
+ prefetch(va + 64); /* Next cacheline too. */
+
+ page_info = &rx->data.page_info[idx];
+ data_slot = &rx->data.data_ring[idx];
+ page_bus = (rx->data.raw_addressing) ?
+ be64_to_cpu(data_slot->addr) - page_info->page_offset :
+ rx->data.qpl->page_buses[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
+ frag_size -= page_info->pad;
+
+ skb = gve_rx_skb(priv, rx, page_info, napi, frag_size,
+ data_slot, is_only_frag);
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+
+ napi_free_frags(napi);
+ ctx->drop_pkt = true;
+ goto finish_frag;
+ }
+ ctx->total_size += frag_size;
+
+ if (is_first_frag) {
+ if (likely(feat & NETIF_F_RXCSUM)) {
+ /* NIC passes up the partial sum */
+ if (desc->csum)
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum = csum_unfold(desc->csum);
}
- ctx->curr_frag_cnt++;
- rx->cnt++;
- idx = rx->cnt & rx->mask;
- work_cnt++;
- desc = &rx->desc.desc_ring[idx];
+ /* parse flags & pass relevant info up */
+ if (likely(feat & NETIF_F_RXHASH) &&
+ gve_needs_rss(desc->flags_seq))
+ skb_set_hash(skb, be32_to_cpu(desc->rss_hash),
+ gve_rss_type(desc->flags_seq));
}
- if (likely(feat & NETIF_F_RXCSUM)) {
- /* NIC passes up the partial sum */
- if (first_desc->csum)
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (is_last_frag) {
+ skb_record_rx_queue(skb, rx->q_num);
+ if (skb_is_nonlinear(skb))
+ napi_gro_frags(napi);
else
- skb->ip_summed = CHECKSUM_NONE;
- skb->csum = csum_unfold(first_desc->csum);
+ napi_gro_receive(napi, skb);
+ goto finish_ok_pkt;
}
- /* parse flags & pass relevant info up */
- if (likely(feat & NETIF_F_RXHASH) &&
- gve_needs_rss(first_desc->flags_seq))
- skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash),
- gve_rss_type(first_desc->flags_seq));
-
- *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
- *work_done = work_cnt;
- skb_record_rx_queue(skb, rx->q_num);
- if (skb_is_nonlinear(skb))
- napi_gro_frags(napi);
- else
- napi_gro_receive(napi, skb);
-
- gve_rx_ctx_clear(ctx);
- return true;
-
-skb_alloc_fail:
- if (napi->skb)
- napi_free_frags(napi);
- *packet_size_bytes = 0;
- *work_done = ctx->expected_frag_cnt;
- while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
- rx->cnt++;
- ctx->curr_frag_cnt++;
+ goto finish_frag;
+
+finish_ok_pkt:
+ cnts->ok_pkt_bytes += ctx->total_size;
+ cnts->ok_pkt_cnt++;
+finish_frag:
+ ctx->frag_cnt++;
+ if (is_last_frag) {
+ cnts->total_pkt_cnt++;
+ cnts->cont_pkt_cnt += (ctx->frag_cnt > 1);
+ gve_rx_ctx_clear(ctx);
}
- gve_rx_ctx_clear(ctx);
- return false;
}
bool gve_rx_work_pending(struct gve_rx_ring *rx)
@@ -704,36 +773,39 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat)
{
- u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0;
+ struct gve_rx_ctx *ctx = &rx->ctx;
struct gve_priv *priv = rx->gve;
+ struct gve_rx_cnts cnts = {0};
+ struct gve_rx_desc *next_desc;
u32 idx = rx->cnt & rx->mask;
- struct gve_rx_desc *desc;
- u64 bytes = 0;
+ u32 work_done = 0;
+
+ struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];
- desc = &rx->desc.desc_ring[idx];
+ // Exceed budget only if (and till) the inflight packet is consumed.
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
- work_done < budget) {
- u64 packet_size_bytes = 0;
- u32 work_cnt = 0;
- bool dropped;
-
- netif_info(priv, rx_status, priv->dev,
- "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
- rx->q_num, idx, desc, desc->flags_seq);
- netif_info(priv, rx_status, priv->dev,
- "[%d] seqno=%d rx->desc.seqno=%d\n",
- rx->q_num, GVE_SEQNO(desc->flags_seq),
- rx->desc.seqno);
-
- dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt);
- if (!dropped) {
- bytes += packet_size_bytes;
- ok_packet_cnt++;
- }
- total_packet_cnt++;
+ (work_done < budget || ctx->frag_cnt)) {
+ next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask];
+ prefetch(next_desc);
+
+ gve_rx(rx, feat, desc, idx, &cnts);
+
+ rx->cnt++;
idx = rx->cnt & rx->mask;
desc = &rx->desc.desc_ring[idx];
- work_done += work_cnt;
+ rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
+ work_done++;
+ }
+
+ // The device will only send whole packets.
+ if (unlikely(ctx->frag_cnt)) {
+ struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+
+ napi_free_frags(napi);
+ gve_rx_ctx_clear(&rx->ctx);
+ netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset",
+ GVE_SEQNO(desc->flags_seq), rx->desc.seqno);
+ gve_schedule_reset(rx->gve);
}
if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
@@ -741,8 +813,10 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
if (work_done) {
u64_stats_update_begin(&rx->statss);
- rx->rpackets += ok_packet_cnt;
- rx->rbytes += bytes;
+ rx->rpackets += cnts.ok_pkt_cnt;
+ rx->rbytes += cnts.ok_pkt_bytes;
+ rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt;
+ rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt;
u64_stats_update_end(&rx->statss);
}
@@ -767,7 +841,7 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
}
gve_rx_write_doorbell(priv, rx);
- return total_packet_cnt;
+ return cnts.total_pkt_cnt;
}
int gve_rx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 2e6461b0ea8b..630f42a3037b 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
- &buf_state->page_info, buf_len, 0, NULL);
+ &buf_state->page_info, buf_len, 0);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 588d64819ed5..b76143bfd594 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -953,12 +953,18 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
} else if (type == GVE_COMPL_TYPE_DQO_PKT) {
u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
-
- gve_handle_packet_completion(priv, tx, !!napi,
- compl_tag,
- &pkt_compl_bytes,
- &pkt_compl_pkts,
- /*is_reinjection=*/false);
+ if (compl_tag & GVE_ALT_MISS_COMPL_BIT) {
+ compl_tag &= ~GVE_ALT_MISS_COMPL_BIT;
+ gve_handle_miss_completion(priv, tx, compl_tag,
+ &miss_compl_bytes,
+ &miss_compl_pkts);
+ } else {
+ gve_handle_packet_completion(priv, tx, !!napi,
+ compl_tag,
+ &pkt_compl_bytes,
+ &pkt_compl_pkts,
+ false);
+ }
} else if (type == GVE_COMPL_TYPE_DQO_MISS) {
u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
@@ -972,7 +978,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
compl_tag,
&reinject_compl_bytes,
&reinject_compl_pkts,
- /*is_reinjection=*/true);
+ true);
}
tx->dqo_compl.head =
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index d57508bc4307..6ba46adaaee3 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -50,34 +50,18 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 padding, struct gve_rx_ctx *ctx)
+ u16 padding)
{
void *va = page_info->page_address + padding + page_info->page_offset;
- int skb_linear_offset = 0;
- bool set_protocol = false;
struct sk_buff *skb;
- if (ctx) {
- if (!ctx->skb_head)
- ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
-
- if (unlikely(!ctx->skb_head))
- return NULL;
- skb = ctx->skb_head;
- skb_linear_offset = skb->len;
- set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
- } else {
- skb = napi_alloc_skb(napi, len);
-
- if (unlikely(!skb))
- return NULL;
- set_protocol = true;
- }
- __skb_put(skb, len);
- skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
+ skb = napi_alloc_skb(napi, len);
+ if (unlikely(!skb))
+ return NULL;
- if (set_protocol)
- skb->protocol = eth_type_trans(skb, dev);
+ __skb_put(skb, len);
+ skb_copy_to_linear_data_offset(skb, 0, va, len);
+ skb->protocol = eth_type_trans(skb, dev);
return skb;
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 6d98e69fd3b8..79595940b351 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad, struct gve_rx_ctx *ctx);
+ u16 pad);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 430eccea8e5e..9b26f0f2c748 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -424,8 +424,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
return ret;
}
- __module_get(THIS_MODULE);
-
INIT_LIST_HEAD(&hdev->handle_list);
spin_lock_init(&hdev->lock);
@@ -445,7 +443,6 @@ EXPORT_SYMBOL(hnae_ae_register);
void hnae_ae_unregister(struct hnae_ae_dev *hdev)
{
device_unregister(&hdev->cls_dev);
- module_put(THIS_MODULE);
}
EXPORT_SYMBOL(hnae_ae_unregister);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 028577943ec5..0ec5730b1788 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2488,7 +2488,7 @@ static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
if (is_tx) {
stats->tx_bytes += ring->stats.tx_bytes;
stats->tx_packets += ring->stats.tx_pkts;
@@ -2522,7 +2522,7 @@ static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
stats->multicast += ring->stats.rx_multicast;
stats->rx_length_errors += ring->stats.err_pkt_len;
}
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
}
static void hns3_nic_get_stats64(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 4c441e6a5082..3d3b69605423 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -13,11 +13,6 @@ static int hclge_devlink_info_get(struct devlink *devlink,
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
struct hclge_dev *hdev = priv->hdev;
- int ret;
-
- ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (ret)
- return ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index a40b1583f114..80a2a0073d97 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -22,28 +22,16 @@ static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
return 0;
}
-static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int hclge_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
- u64 adj_val, adj_base, diff;
+ u64 adj_val, adj_base;
unsigned long flags;
- bool is_neg = false;
u32 quo, numerator;
- if (ppb < 0) {
- ppb = -ppb;
- is_neg = true;
- }
-
adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
- adj_val = adj_base * ppb;
- diff = div_u64(adj_val, 1000000000ULL);
-
- if (is_neg)
- adj_val = adj_base - diff;
- else
- adj_val = adj_base + diff;
+ adj_val = adjust_by_scaled_ppm(adj_base, scaled_ppm);
/* This clock cycle is defined by three part: quotient, numerator
* and denominator. For example, 2.5ns, the quotient is 2,
@@ -446,7 +434,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
ptp->info.n_ext_ts = 0;
ptp->info.pps = 0;
- ptp->info.adjfreq = hclge_ptp_adjfreq;
+ ptp->info.adjfine = hclge_ptp_adjfine;
ptp->info.adjtime = hclge_ptp_adjtime;
ptp->info.gettimex64 = hclge_ptp_gettimex;
ptp->info.settime64 = hclge_ptp_settime;
@@ -504,7 +492,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
goto out;
set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
- ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0);
+ ret = hclge_ptp_adjfine(&hdev->ptp->info, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index fdc19868b818..a6c3c5e8f0ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -13,11 +13,6 @@ static int hclgevf_devlink_info_get(struct devlink *devlink,
struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN];
struct hclgevf_dev *hdev = priv->hdev;
- int ret;
-
- ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (ret)
- return ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index a4fbf44f944c..52ea97c818b8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -22,6 +22,10 @@
#define LP_PKT_CNT 64
+#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
+#define HINIC_MAX_MTU_SIZE (HINIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN)
+#define HINIC_MIN_MTU_SIZE 256
+
enum hinic_flags {
HINIC_LINK_UP = BIT(0),
HINIC_INTF_UP = BIT(1),
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index d2d89b0a5ef0..6b5797e69781 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -46,104 +46,170 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_VF_REGISTER = 0x0,
HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
- HINIC_PORT_CMD_CHANGE_MTU = 2,
+ HINIC_PORT_CMD_CHANGE_MTU = 0x2,
- HINIC_PORT_CMD_ADD_VLAN = 3,
- HINIC_PORT_CMD_DEL_VLAN = 4,
+ HINIC_PORT_CMD_ADD_VLAN = 0x3,
+ HINIC_PORT_CMD_DEL_VLAN = 0x4,
- HINIC_PORT_CMD_SET_PFC = 5,
+ HINIC_PORT_CMD_SET_ETS = 0x7,
+ HINIC_PORT_CMD_GET_ETS = 0x8,
- HINIC_PORT_CMD_SET_MAC = 9,
- HINIC_PORT_CMD_GET_MAC = 10,
- HINIC_PORT_CMD_DEL_MAC = 11,
+ HINIC_PORT_CMD_SET_PFC = 0x5,
- HINIC_PORT_CMD_SET_RX_MODE = 12,
+ HINIC_PORT_CMD_SET_MAC = 0x9,
+ HINIC_PORT_CMD_GET_MAC = 0xA,
+ HINIC_PORT_CMD_DEL_MAC = 0xB,
- HINIC_PORT_CMD_GET_PAUSE_INFO = 20,
- HINIC_PORT_CMD_SET_PAUSE_INFO = 21,
+ HINIC_PORT_CMD_SET_RX_MODE = 0xC,
- HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xD,
- HINIC_PORT_CMD_SET_LRO = 25,
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 0x15,
- HINIC_PORT_CMD_SET_RX_CSUM = 26,
+ HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
- HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 27,
+ HINIC_PORT_CMD_SET_LRO = 0x19,
- HINIC_PORT_CMD_GET_PORT_STATISTICS = 28,
+ HINIC_PORT_CMD_SET_RX_CSUM = 0x1A,
- HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 29,
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1B,
- HINIC_PORT_CMD_GET_VPORT_STAT = 30,
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1C,
- HINIC_PORT_CMD_CLEAN_VPORT_STAT = 31,
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 0x1D,
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 37,
+ HINIC_PORT_CMD_GET_VPORT_STAT = 0x1E,
- HINIC_PORT_CMD_SET_PORT_STATE = 41,
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT = 0x1F,
- HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 43,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 44,
+ HINIC_PORT_CMD_SET_PORT_STATE = 0x29,
+ HINIC_PORT_CMD_GET_PORT_STATE = 0x30,
- HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 45,
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2B,
- HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 46,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 0x2C,
- HINIC_PORT_CMD_GET_RSS_CTX_TBL = 47,
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 0x2D,
- HINIC_PORT_CMD_SET_RSS_CTX_TBL = 48,
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 0x2E,
- HINIC_PORT_CMD_RSS_TEMP_MGR = 49,
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL = 0x2F,
- HINIC_PORT_CMD_RD_LINE_TBL = 57,
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL = 0x30,
- HINIC_PORT_CMD_RSS_CFG = 66,
+ HINIC_PORT_CMD_RSS_TEMP_MGR = 0x31,
- HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_RD_LINE_TBL = 0x39,
- HINIC_PORT_CMD_GET_LOOPBACK_MODE = 72,
- HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+ HINIC_PORT_CMD_RSS_CFG = 0x42,
- HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
+ HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
- HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
+ HINIC_PORT_CMD_FWCTXT_INIT = 0x45,
- HINIC_PORT_CMD_SET_FUNC_STATE = 93,
+ HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48,
+ HINIC_PORT_CMD_SET_LOOPBACK_MODE = 0x49,
- HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4A,
+ HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE = 0x4B,
- HINIC_PORT_CMD_SET_VF_RATE = 105,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4E,
- HINIC_PORT_CMD_SET_VF_VLAN = 106,
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
- HINIC_PORT_CMD_CLR_VF_VLAN,
+ HINIC_PORT_CMD_GET_PORT_TYPE = 0x5B,
- HINIC_PORT_CMD_SET_TSO = 112,
+ HINIC_PORT_CMD_SET_FUNC_STATE = 0x5D,
- HINIC_PORT_CMD_UPDATE_FW = 114,
+ HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5E,
- HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+ HINIC_PORT_CMD_GET_DMA_CS = 0x64,
+ HINIC_PORT_CMD_SET_DMA_CS = 0x65,
- HINIC_PORT_CMD_LINK_STATUS_REPORT = 160,
+ HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
- HINIC_PORT_CMD_UPDATE_MAC = 164,
+ HINIC_PORT_CMD_SET_VF_RATE = 0x69,
- HINIC_PORT_CMD_GET_CAP = 170,
+ HINIC_PORT_CMD_SET_VF_VLAN = 0x6A,
- HINIC_PORT_CMD_GET_LINK_MODE = 217,
+ HINIC_PORT_CMD_CLR_VF_VLAN = 0x6B,
- HINIC_PORT_CMD_SET_SPEED = 218,
+ HINIC_PORT_CMD_SET_TSO = 0x70,
- HINIC_PORT_CMD_SET_AUTONEG = 219,
+ HINIC_PORT_CMD_UPDATE_FW = 0x72,
- HINIC_PORT_CMD_GET_STD_SFP_INFO = 240,
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
- HINIC_PORT_CMD_SET_LRO_TIMER = 244,
+ HINIC_PORT_CMD_SET_PFC_THD = 0x75,
- HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xA0,
- HINIC_PORT_CMD_GET_SFP_ABS = 251,
+ HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xA3,
+
+ HINIC_PORT_CMD_UPDATE_MAC = 0xA4,
+
+ HINIC_PORT_CMD_GET_CAP = 0xAA,
+
+ HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xAF,
+ HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xB0,
+ HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xB1,
+
+ HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xB2,
+
+ HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xB3,
+
+ HINIC_PORT_CMD_UP_TC_ENABLE = 0xB4,
+
+ HINIC_PORT_CMD_UP_TC_GET_TCAM_BLOCK = 0xB5,
+
+ HINIC_PORT_CMD_SET_IPSU_MAC = 0xCB,
+ HINIC_PORT_CMD_GET_IPSU_MAC = 0xCC,
+
+ HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
+
+ HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
+
+ HINIC_PORT_CMD_SET_SPEED = 0xDA,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
+
+ HINIC_PORT_CMD_CLEAR_QP_RES = 0xDD,
+
+ HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
+
+ HINIC_PORT_CMD_SET_VF_COS = 0xDF,
+ HINIC_PORT_CMD_GET_VF_COS = 0xE1,
+
+ HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
+
+ HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
+
+ HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
+
+ HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
+
+ HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
+
+ HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
+
+ HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
+
+ HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9,
+
+ HINIC_PORT_CMD_GET_SFP_ABS = 0xFB,
+
+ HINIC_PORT_CMD_Q_FILTER = 0xFC,
+
+ HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
+
+ HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF,
};
/* cmd of mgmt CPU message for HILINK module */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2d6906aba2a2..499c657d37a9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -1092,6 +1092,16 @@ static int set_features(struct hinic_dev *nic_dev,
}
}
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ ret = hinic_set_vlan_fliter(nic_dev,
+ !!(features &
+ NETIF_F_HW_VLAN_CTAG_FILTER));
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ }
+
if (err) {
nic_dev->netdev->features = features ^ failed_features;
return -EIO;
@@ -1187,7 +1197,8 @@ static int nic_dev_init(struct pci_dev *pdev)
else
netdev->netdev_ops = &hinicvf_netdev_ops;
- netdev->max_mtu = ETH_MAX_MTU;
+ netdev->max_mtu = HINIC_MAX_MTU_SIZE;
+ netdev->min_mtu = HINIC_MIN_MTU_SIZE;
nic_dev = netdev_priv(netdev);
nic_dev->netdev = netdev;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 28ae6f1201a8..9406237c461e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -17,9 +17,6 @@
#include "hinic_port.h"
#include "hinic_dev.h"
-#define HINIC_MIN_MTU_SIZE 256
-#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
-
enum mac_op {
MAC_DEL,
MAC_SET,
@@ -147,24 +144,12 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
**/
int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
{
- struct net_device *netdev = nic_dev->netdev;
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mtu_cmd port_mtu_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
u16 out_size = sizeof(port_mtu_cmd);
struct pci_dev *pdev = hwif->pdev;
- int err, max_frame;
-
- if (new_mtu < HINIC_MIN_MTU_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
- return -EINVAL;
- }
-
- max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size");
- return -EINVAL;
- }
+ int err;
port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
port_mtu_cmd.mtu = new_mtu;
@@ -462,6 +447,39 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
return 0;
}
+int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_vlan_filter vlan_filter;
+ u16 out_size = sizeof(vlan_filter);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ vlan_filter.enable = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
+ &vlan_filter, sizeof(vlan_filter),
+ &vlan_filter, &out_size);
+ if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ HINIC_IS_VF(hwif)) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || vlan_filter.status) {
+ dev_err(&pdev->dev,
+ "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_filter.status, out_size);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index c9ae3d4dc547..c8694ac7c702 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -351,6 +351,16 @@ struct hinic_vlan_cfg {
u8 rsvd1[5];
};
+struct hinic_vlan_filter {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 rsvd1[2];
+ u32 enable;
+};
+
struct hinic_rss_template_mgmt {
u8 status;
u8 version;
@@ -831,6 +841,8 @@ int hinic_get_vport_stats(struct hinic_dev *nic_dev,
int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
+int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en);
+
int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
int hinic_set_link_settings(struct hinic_hwdev *hwdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index d649c6e323c8..ceec8be2a73b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index f7e05b41385b..ee357088d021 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -489,6 +489,24 @@ static struct vf_cmd_check_handle nic_cmd_support_vf[] = {
{HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B},
{HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B},
{HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_VF_COS, NULL},
+ {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_TCAM_FILTER, NULL},
+ {HINIC_PORT_CMD_UP_TC_ADD_FLOW, NULL},
+ {HINIC_PORT_CMD_UP_TC_DEL_FLOW, NULL},
+ {HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UP_TC_ENABLE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_CABLE_PLUG_EVENT, NULL},
+ {HINIC_PORT_CMD_LINK_ERR_EVENT, NULL},
+ {HINIC_PORT_CMD_SET_PORT_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_ETS, NULL},
+ {HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, NULL},
+ {HINIC_PORT_CMD_RESET_LINK_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_LINK_FOLLOW, NULL},
+ {HINIC_PORT_CMD_CLEAR_QP_RES, NULL},
};
#define CHECK_IPSU_15BIT 0X8000
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index e91476c8ff8b..ad47ac51a139 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5b96cd94dcd2..113fcb3e353e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -690,8 +690,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
- if (!adapter->pool_config)
- netif_tx_stop_all_queues(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -799,9 +798,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (netif_running(dev)) {
restart = 1;
- adapter->pool_config = 1;
ibmveth_close(dev);
- adapter->pool_config = 0;
}
set_attr = 0;
@@ -883,9 +880,7 @@ static int ibmveth_set_tso(struct net_device *dev, u32 data)
if (netif_running(dev)) {
restart = 1;
- adapter->pool_config = 1;
ibmveth_close(dev);
- adapter->pool_config = 0;
}
set_attr = 0;
@@ -1535,9 +1530,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
only the buffer pools necessary to hold the new MTU */
if (netif_running(adapter->netdev)) {
need_restart = 1;
- adapter->pool_config = 1;
ibmveth_close(adapter->netdev);
- adapter->pool_config = 0;
}
/* Look for an active buffer pool that can hold the new MTU */
@@ -1701,7 +1694,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
- adapter->pool_config = 0;
ibmveth_init_link_settings(netdev);
netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
@@ -1842,9 +1834,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
return -ENOMEM;
}
pool->active = 1;
- adapter->pool_config = 1;
ibmveth_close(netdev);
- adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
} else {
@@ -1870,10 +1860,8 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
}
if (netif_running(netdev)) {
- adapter->pool_config = 1;
ibmveth_close(netdev);
pool->active = 0;
- adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
}
@@ -1884,9 +1872,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
return -EINVAL;
} else {
if (netif_running(netdev)) {
- adapter->pool_config = 1;
ibmveth_close(netdev);
- adapter->pool_config = 0;
pool->size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
@@ -1899,9 +1885,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
return -EINVAL;
} else {
if (netif_running(netdev)) {
- adapter->pool_config = 1;
ibmveth_close(netdev);
- adapter->pool_config = 0;
pool->buff_size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 115d4c45aa77..8468e2c59d7a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -147,7 +147,6 @@ struct ibmveth_adapter {
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
struct ibmveth_rx_q rx_queue;
- int pool_config;
int rx_csum;
int large_send;
bool is_active_trunk;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9282381a438f..e19a6bb3f444 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -68,6 +68,7 @@
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/utsname.h>
+#include <linux/cpu.h>
#include "ibmvnic.h"
@@ -171,6 +172,193 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
return ibmvnic_send_crq(adapter, &crq);
}
+static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *queue)
+{
+ if (!(queue && queue->irq))
+ return;
+
+ cpumask_clear(queue->affinity_mask);
+
+ if (irq_set_affinity_and_hint(queue->irq, NULL))
+ netdev_warn(adapter->netdev,
+ "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
+ __func__, queue, queue->irq);
+}
+
+static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_sub_crq_queue **rxqs;
+ struct ibmvnic_sub_crq_queue **txqs;
+ int num_rxqs, num_txqs;
+ int rc, i;
+
+ rc = 0;
+ rxqs = adapter->rx_scrq;
+ txqs = adapter->tx_scrq;
+ num_txqs = adapter->num_active_tx_scrqs;
+ num_rxqs = adapter->num_active_rx_scrqs;
+
+ netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
+ if (txqs) {
+ for (i = 0; i < num_txqs; i++)
+ ibmvnic_clean_queue_affinity(adapter, txqs[i]);
+ }
+ if (rxqs) {
+ for (i = 0; i < num_rxqs; i++)
+ ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
+ }
+}
+
+static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
+ unsigned int *cpu, int *stragglers,
+ int stride)
+{
+ cpumask_var_t mask;
+ int i;
+ int rc = 0;
+
+ if (!(queue && queue->irq))
+ return rc;
+
+ /* cpumask_var_t is either a pointer or array, allocation works here */
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* while we have extra cpu give one extra to this irq */
+ if (*stragglers) {
+ stride++;
+ (*stragglers)--;
+ }
+ /* atomic write is safer than writing bit by bit directly */
+ for (i = 0; i < stride; i++) {
+ cpumask_set_cpu(*cpu, mask);
+ *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
+ nr_cpu_ids, false);
+ }
+ /* set queue affinity mask */
+ cpumask_copy(queue->affinity_mask, mask);
+ rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
+ free_cpumask_var(mask);
+
+ return rc;
+}
+
+/* assumes cpu read lock is held */
+static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
+ struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
+ struct ibmvnic_sub_crq_queue *queue;
+ int num_rxqs = adapter->num_active_rx_scrqs;
+ int num_txqs = adapter->num_active_tx_scrqs;
+ int total_queues, stride, stragglers, i;
+ unsigned int num_cpu, cpu;
+ int rc = 0;
+
+ netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
+ if (!(adapter->rx_scrq && adapter->tx_scrq)) {
+ netdev_warn(adapter->netdev,
+ "%s: Set affinity failed, queues not allocated\n",
+ __func__);
+ return;
+ }
+
+ total_queues = num_rxqs + num_txqs;
+ num_cpu = num_online_cpus();
+ /* number of cpu's assigned per irq */
+ stride = max_t(int, num_cpu / total_queues, 1);
+ /* number of leftover cpu's */
+ stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
+ /* next available cpu to assign irq to */
+ cpu = cpumask_next(-1, cpu_online_mask);
+
+ for (i = 0; i < num_txqs; i++) {
+ queue = txqs[i];
+ rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
+ stride);
+ if (rc)
+ goto out;
+
+ if (!queue)
+ continue;
+
+ rc = __netif_set_xps_queue(adapter->netdev,
+ cpumask_bits(queue->affinity_mask),
+ i, XPS_CPUS);
+ if (rc)
+ netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
+ __func__, i, rc);
+ }
+
+ for (i = 0; i < num_rxqs; i++) {
+ queue = rxqs[i];
+ rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
+ stride);
+ if (rc)
+ goto out;
+ }
+
+out:
+ if (rc) {
+ netdev_warn(adapter->netdev,
+ "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
+ __func__, queue, queue->irq, rc);
+ ibmvnic_clean_affinity(adapter);
+ }
+}
+
+static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
+ ibmvnic_set_affinity(adapter);
+ return 0;
+}
+
+static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
+ ibmvnic_set_affinity(adapter);
+ return 0;
+}
+
+static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
+ ibmvnic_clean_affinity(adapter);
+ return 0;
+}
+
+static enum cpuhp_state ibmvnic_online;
+
+static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
+{
+ int ret;
+
+ ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
+ if (ret)
+ return ret;
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
+ &adapter->node_dead);
+ if (!ret)
+ return ret;
+ cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
+ return ret;
+}
+
+static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
+{
+ cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
+ &adapter->node_dead);
+}
+
static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
unsigned long length, unsigned long *number,
unsigned long *irq)
@@ -3626,6 +3814,8 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq)
return -EINVAL;
+ ibmvnic_clean_affinity(adapter);
+
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@@ -3675,6 +3865,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
+ free_cpumask_var(scrq->affinity_mask);
kfree(scrq);
}
@@ -3695,6 +3886,8 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
goto zero_page_failed;
}
+ if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
+ goto cpumask_alloc_failed;
scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -3747,6 +3940,8 @@ reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
map_failed:
+ free_cpumask_var(scrq->affinity_mask);
+cpumask_alloc_failed:
free_pages((unsigned long)scrq->msgs, 2);
zero_page_failed:
kfree(scrq);
@@ -3758,6 +3953,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
{
int i;
+ ibmvnic_clean_affinity(adapter);
if (adapter->tx_scrq) {
for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
if (!adapter->tx_scrq[i])
@@ -4035,6 +4231,11 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
goto req_rx_irq_failed;
}
}
+
+ cpus_read_lock();
+ ibmvnic_set_affinity(adapter);
+ cpus_read_unlock();
+
return rc;
req_rx_irq_failed:
@@ -6152,10 +6353,19 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
dev_info(&dev->dev, "ibmvnic registered\n");
+ rc = ibmvnic_cpu_notif_add(adapter);
+ if (rc) {
+ netdev_err(netdev, "Registering cpu notifier failed\n");
+ goto cpu_notif_add_failed;
+ }
+
complete(&adapter->probe_done);
return 0;
+cpu_notif_add_failed:
+ unregister_netdev(netdev);
+
ibmvnic_register_fail:
device_remove_file(&dev->dev, &dev_attr_failover);
@@ -6206,6 +6416,8 @@ static void ibmvnic_remove(struct vio_dev *dev)
spin_unlock_irqrestore(&adapter->state_lock, flags);
+ ibmvnic_cpu_notif_remove(adapter);
+
flush_work(&adapter->ibmvnic_reset);
flush_delayed_work(&adapter->ibmvnic_delayed_reset);
@@ -6336,15 +6548,40 @@ static struct vio_driver ibmvnic_driver = {
/* module functions */
static int __init ibmvnic_module_init(void)
{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
+ ibmvnic_cpu_online,
+ ibmvnic_cpu_down_prep);
+ if (ret < 0)
+ goto out;
+ ibmvnic_online = ret;
+ ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
+ NULL, ibmvnic_cpu_dead);
+ if (ret)
+ goto err_dead;
+
+ ret = vio_register_driver(&ibmvnic_driver);
+ if (ret)
+ goto err_vio_register;
+
pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
IBMVNIC_DRIVER_VERSION);
- return vio_register_driver(&ibmvnic_driver);
+ return 0;
+err_vio_register:
+ cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
+err_dead:
+ cpuhp_remove_multi_state(ibmvnic_online);
+out:
+ return ret;
}
static void __exit ibmvnic_module_exit(void)
{
vio_unregister_driver(&ibmvnic_driver);
+ cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
+ cpuhp_remove_multi_state(ibmvnic_online);
}
module_init(ibmvnic_module_init);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index e5c6ff3d0c47..b35c9b6f913b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -825,6 +825,7 @@ struct ibmvnic_sub_crq_queue {
atomic_t used;
char name[32];
u64 handle;
+ cpumask_var_t affinity_mask;
} ____cacheline_aligned;
struct ibmvnic_long_term_buff {
@@ -983,6 +984,10 @@ struct ibmvnic_adapter {
int reset_done_rc;
bool wait_for_reset;
+ /* CPU hotplug instances for online & dead */
+ struct hlist_node node;
+ struct hlist_node node_dead;
+
/* partner capabilities */
u64 min_tx_queues;
u64 min_rx_queues;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 61e60e4de600..da6e303ad99b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4229,8 +4229,6 @@ process_skb:
*/
p = buffer_info->rxbuf.page;
if (length <= copybreak) {
- u8 *vaddr;
-
if (likely(!(netdev->features & NETIF_F_RXFCS)))
length -= 4;
skb = e1000_alloc_rx_skb(adapter,
@@ -4238,10 +4236,9 @@ process_skb:
if (!skb)
break;
- vaddr = kmap_atomic(p);
- memcpy(skb_tail_pointer(skb), vaddr,
- length);
- kunmap_atomic(vaddr);
+ memcpy(skb_tail_pointer(skb),
+ page_address(p), length);
+
/* re-use the page, so don't erase
* buffer_info->rxbuf.page
*/
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 44e58b6e7660..0baa15503c38 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -5,6 +5,9 @@
# Makefile for the Intel(R) PRO/1000 ethernet driver
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index e8a9a9610ac6..a187582d2299 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -116,7 +116,8 @@ enum e1000_boards {
board_pch_spt,
board_pch_cnp,
board_pch_tgp,
- board_pch_adp
+ board_pch_adp,
+ board_pch_mtp
};
struct e1000_ps_page {
@@ -504,6 +505,7 @@ extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_pch_adp_info;
+extern const struct e1000_info e1000_pch_mtp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000e_trace.h b/drivers/net/ethernet/intel/e1000e/e1000e_trace.h
new file mode 100644
index 000000000000..19d3cf4d924e
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/e1000e_trace.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022, Intel Corporation. */
+/* Modeled on trace-events-sample.h */
+/* The trace subsystem name for e1000e will be "e1000e_trace".
+ *
+ * This file is named e1000e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM e1000e_trace
+
+#if !defined(_TRACE_E1000E_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_E1000E_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(e1000e_trace_mac_register,
+ TP_PROTO(uint32_t reg),
+ TP_ARGS(reg),
+ TP_STRUCT__entry(__field(uint32_t, reg)),
+ TP_fast_assign(__entry->reg = reg;),
+ TP_printk("event: TraceHub e1000e mac register: 0x%08x",
+ __entry->reg)
+);
+
+#endif
+/* This must be outside ifdef _E1000E_TRACE_H */
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE e1000e_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 51a5afe9df2f..59e82d131d88 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -908,6 +908,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
mask |= BIT(18);
break;
default:
@@ -1575,6 +1576,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index bcf680e83811..29f9fae35f42 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -114,6 +114,14 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
#define E1000_DEV_ID_PCH_LNP_I219_V21 0x5511
+#define E1000_DEV_ID_PCH_ARL_I219_LM24 0x57A0
+#define E1000_DEV_ID_PCH_ARL_I219_V24 0x57A1
+#define E1000_DEV_ID_PCH_PTP_I219_LM25 0x57B3
+#define E1000_DEV_ID_PCH_PTP_I219_V25 0x57B4
+#define E1000_DEV_ID_PCH_PTP_I219_LM26 0x57B5
+#define E1000_DEV_ID_PCH_PTP_I219_V26 0x57B6
+#define E1000_DEV_ID_PCH_PTP_I219_LM27 0x57B7
+#define E1000_DEV_ID_PCH_PTP_I219_V27 0x57B8
#define E1000_REVISION_4 4
@@ -141,6 +149,7 @@ enum e1000_mac_type {
e1000_pch_adp,
e1000_pch_mtp,
e1000_pch_lnp,
+ e1000_pch_ptp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9466f65a6da7..0c7fd10312c8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -322,6 +322,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -468,6 +469,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -714,6 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1681,6 +1684,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2137,6 +2141,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3182,6 +3187,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4122,6 +4128,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -6041,3 +6048,23 @@ const struct e1000_info e1000_pch_adp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_mtp_info = {
+ .mac = e1000_pch_mtp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 55cf2f62bb30..04acd1a992fa 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -28,6 +28,8 @@
#include <linux/suspend.h>
#include "e1000.h"
+#define CREATE_TRACE_POINTS
+#include "e1000e_trace.h"
char e1000e_driver_name[] = "e1000e";
@@ -53,6 +55,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
[board_pch_adp] = &e1000_pch_adp_info,
+ [board_pch_mtp] = &e1000_pch_mtp_info,
};
struct e1000_reg_info {
@@ -1388,26 +1391,18 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
/* page alloc/put takes too long and effects small
* packet throughput, so unsplit small packets and
- * save the alloc/put only valid in softirq (napi)
- * context to call kmap_*
+ * save the alloc/put
*/
if (l1 && (l1 <= copybreak) &&
((length + l1) <= adapter->rx_ps_bsize0)) {
- u8 *vaddr;
-
ps_page = &buffer_info->ps_pages[0];
- /* there is no documentation about how to call
- * kmap_atomic, so we can't hold the mapping
- * very long
- */
dma_sync_single_for_cpu(&pdev->dev,
ps_page->dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
- vaddr = kmap_atomic(ps_page->page);
- memcpy(skb_tail_pointer(skb), vaddr, l1);
- kunmap_atomic(vaddr);
+ memcpy(skb_tail_pointer(skb),
+ page_address(ps_page->page), l1);
dma_sync_single_for_device(&pdev->dev,
ps_page->dma,
PAGE_SIZE,
@@ -1607,11 +1602,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
*/
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
- u8 *vaddr;
- vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr,
+ memcpy(skb_tail_pointer(skb),
+ page_address(buffer_info->page),
length);
- kunmap_atomic(vaddr);
/* re-use the page, so don't erase
* buffer_info->page
*/
@@ -3552,6 +3545,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4067,6 +4061,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -6348,6 +6343,7 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
mac_data &= ~E1000_H2ME_EXIT_DPG;
+ trace_e1000e_trace_mac_register(mac_data);
ew32(H2ME, mac_data);
} else {
/* Request driver configure the device to S0ix */
@@ -6502,6 +6498,7 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
mac_data |= E1000_H2ME_EXIT_DPG;
+ trace_e1000e_trace_mac_register(mac_data);
ew32(H2ME, mac_data);
/* Poll up to 2.5 seconds for ME to unconfigure DPG.
@@ -7905,14 +7902,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_LM24), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_V24), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM25), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V25), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM26), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 0e488e4fa5c1..def4566a916f 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -29,17 +29,11 @@ static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
struct e1000_hw *hw = &adapter->hw;
- bool neg_adj = false;
unsigned long flags;
- u64 adjustment;
- u32 timinca, incvalue;
+ u64 incvalue;
+ u32 timinca;
s32 ret_val;
- if (delta < 0) {
- neg_adj = true;
- delta = -delta;
- }
-
/* Get the System Time Register SYSTIM base frequency */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
if (ret_val)
@@ -48,11 +42,7 @@ static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)
spin_lock_irqsave(&adapter->systim_lock, flags);
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
-
- adjustment = mul_u64_u64_div_u64(incvalue, (u64)delta,
- 1000000ULL << 16);
-
- incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
+ incvalue = adjust_by_scaled_ppm(incvalue, delta);
timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
timinca |= incvalue;
@@ -297,6 +287,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2cca9e84e31e..34ab5ff9823b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1229,10 +1229,10 @@ static void fm10k_get_stats64(struct net_device *netdev,
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -1245,10 +1245,10 @@ static void fm10k_get_stats64(struct net_device *netdev,
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9a60d6b207f7..60e351665c70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -992,6 +992,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
/* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f6fa63e4253c..887a735fe2a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer,
* @ring: the ring to copy
*
* Queue statistics must be copied while protected by
- * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
+ * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats.
* Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
* ring pointer is null, zero out the queue stat values and update the data
* pointer. Otherwise safely copy the stats from the ring into the supplied
@@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
/* To avoid invalid statistics values, ensure that we keep retrying
* the copy until we get a consistent value according to
- * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * u64_stats_fetch_retry. But first, make sure our ring is
* non-null before attempting to access its syncp.
*/
do {
- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
for (i = 0; i < size; i++) {
i40e_add_one_ethtool_stat(&(*data)[i], ring,
&stats[i]);
}
- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -1287,8 +1287,10 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
* trying to set something that we do not support.
*/
if (memcmp(&copy_ks.base, &safe_ks.base,
- sizeof(struct ethtool_link_settings)))
+ sizeof(struct ethtool_link_settings))) {
+ netdev_err(netdev, "Only speed and autoneg are supported.\n");
return -EOPNOTSUPP;
+ }
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6416322d7c18..95485b56d6c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -419,10 +419,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
@@ -472,10 +472,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
if (!ring)
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -897,10 +897,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -915,10 +915,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
@@ -935,10 +935,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -4123,6 +4123,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
+ q_vector->irq_num = irq_num;
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ffea0c9c82f1..c37abbb3cd06 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -347,23 +347,12 @@ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct i40e_hw *hw = &pf->hw;
- u64 adj, freq, diff;
- int neg_adj = 0;
-
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
+ u64 adj, base_adj;
smp_mb(); /* Force any pending update before accessing. */
- freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
- diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm,
- 1000000ULL << 16);
+ base_adj = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
- if (neg_adj)
- adj = I40E_PTP_40GB_INCVAL - diff;
- else
- adj = I40E_PTP_40GB_INCVAL + diff;
+ adj = adjust_by_scaled_ppm(base_adj, scaled_ppm);
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index b5b12299931f..79d587ad5409 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -55,6 +55,55 @@
* being built from shared code.
*/
+#define NO_DEV "(i40e no_device)"
+
+TRACE_EVENT(i40e_napi_poll,
+
+ TP_PROTO(struct napi_struct *napi, struct i40e_q_vector *q, int budget,
+ int budget_per_ring, unsigned int rx_cleaned, unsigned int tx_cleaned,
+ bool rx_clean_complete, bool tx_clean_complete),
+
+ TP_ARGS(napi, q, budget, budget_per_ring, rx_cleaned, tx_cleaned,
+ rx_clean_complete, tx_clean_complete),
+
+ TP_STRUCT__entry(
+ __field(int, budget)
+ __field(int, budget_per_ring)
+ __field(unsigned int, rx_cleaned)
+ __field(unsigned int, tx_cleaned)
+ __field(int, rx_clean_complete)
+ __field(int, tx_clean_complete)
+ __field(int, irq_num)
+ __field(int, curr_cpu)
+ __string(qname, q->name)
+ __string(dev_name, napi->dev ? napi->dev->name : NO_DEV)
+ __bitmask(irq_affinity, nr_cpumask_bits)
+ ),
+
+ TP_fast_assign(
+ __entry->budget = budget;
+ __entry->budget_per_ring = budget_per_ring;
+ __entry->rx_cleaned = rx_cleaned;
+ __entry->tx_cleaned = tx_cleaned;
+ __entry->rx_clean_complete = rx_clean_complete;
+ __entry->tx_clean_complete = tx_clean_complete;
+ __entry->irq_num = q->irq_num;
+ __entry->curr_cpu = get_cpu();
+ __assign_str(qname, q->name);
+ __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
+ nr_cpumask_bits);
+ ),
+
+ TP_printk("i40e_napi_poll on dev %s q %s irq %d irq_mask %s curr_cpu %d "
+ "budget %d bpr %d rx_cleaned %u tx_cleaned %u "
+ "rx_clean_complete %d tx_clean_complete %d",
+ __get_str(dev_name), __get_str(qname), __entry->irq_num,
+ __get_bitmask(irq_affinity), __entry->curr_cpu, __entry->budget,
+ __entry->budget_per_ring, __entry->rx_cleaned, __entry->tx_cleaned,
+ __entry->rx_clean_complete, __entry->tx_clean_complete)
+);
+
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(
i40e_tx_template,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b97c95f89fa0..924f972b91fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -923,11 +923,13 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi)
* @vsi: the VSI we care about
* @tx_ring: Tx ring to clean
* @napi_budget: Used to determine if we are in netpoll
+ * @tx_cleaned: Out parameter set to the number of TXes cleaned
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
- struct i40e_ring *tx_ring, int napi_budget)
+ struct i40e_ring *tx_ring, int napi_budget,
+ unsigned int *tx_cleaned)
{
int i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
@@ -1048,6 +1050,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
}
}
+ *tx_cleaned = total_packets;
return !!budget;
}
@@ -2422,6 +2425,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
+ * @rx_cleaned: Out parameter of the number of packets processed
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
@@ -2430,7 +2434,8 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
*
* Returns amount of work completed
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ unsigned int *rx_cleaned)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
@@ -2567,6 +2572,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+ *rx_cleaned = total_rx_packets;
+
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_packets;
}
@@ -2689,6 +2696,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
+ bool tx_clean_complete = true;
+ bool rx_clean_complete = true;
+ unsigned int tx_cleaned = 0;
+ unsigned int rx_cleaned = 0;
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
@@ -2705,10 +2716,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->tx) {
bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
- i40e_clean_tx_irq(vsi, ring, budget);
+ i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);
if (!wd) {
- clean_complete = false;
+ clean_complete = tx_clean_complete = false;
continue;
}
arm_wb |= ring->arm_wb;
@@ -2733,14 +2744,18 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->rx) {
int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
- i40e_clean_rx_irq(ring, budget_per_ring);
+ i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
- clean_complete = false;
+ clean_complete = rx_clean_complete = false;
}
+ if (!i40e_enabled_xdp_vsi(vsi))
+ trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
+ tx_cleaned, rx_clean_complete, tx_clean_complete);
+
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
int cpu_id = smp_processor_id();
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index a056e1545615..d79ead5e8d0c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -147,7 +147,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
* @ring: the ring to copy
*
* Queue statistics must be copied while protected by
- * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.
+ * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
* Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
* ring pointer is null, zero out the queue stat values and update the data
* pointer. Otherwise safely copy the stats from the ring into the supplied
@@ -165,14 +165,14 @@ iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
/* To avoid invalid statistics values, ensure that we keep retrying
* the copy until we get a consistent value according to
- * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * u64_stats_fetch_retry. But first, make sure our ring is
* non-null before attempting to access its syncp.
*/
do {
- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
for (i = 0; i < size; i++)
iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index f71e132ede09..c4e451ef7942 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4827,7 +4827,7 @@ static void iavf_shutdown(struct pci_dev *pdev)
iavf_close(netdev);
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
/* Prevent the watchdog from running. */
iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
@@ -5088,7 +5088,7 @@ static void iavf_remove(struct pci_dev *pdev)
}
mutex_lock(&adapter->crit_lock);
- dev_info(&adapter->pdev->dev, "Remove device\n");
+ dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
iavf_request_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 001500afc4a6..2f0b604abc5e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -137,6 +137,21 @@
*/
#define ICE_BW_KBPS_DIVISOR 125
+/* Default recipes have priority 4 and below, hence priority values between 5..7
+ * can be used as filter priority for advanced switch filter (advanced switch
+ * filters need new recipe to be created for specified extraction sequence
+ * because default recipe extraction sequence does not represent custom
+ * extraction)
+ */
+#define ICE_SWITCH_FLTR_PRIO_QUEUE 7
+/* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
+ * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
+ * SYN/FIN/RST))
+ */
+#define ICE_SWITCH_FLTR_PRIO_RSVD 6
+#define ICE_SWITCH_FLTR_PRIO_VSI 5
+#define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
@@ -305,6 +320,11 @@ enum ice_vsi_state {
ICE_VSI_STATE_NBITS /* must be last */
};
+struct ice_vsi_stats {
+ struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */
+ struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */
+};
+
/* struct that defines a VSI, associated with a dev */
struct ice_vsi {
struct net_device *netdev;
@@ -358,6 +378,7 @@ struct ice_vsi {
/* VSI stats */
struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_prev;
struct ice_eth_stats eth_stats;
struct ice_eth_stats eth_stats_prev;
@@ -525,6 +546,7 @@ struct ice_pf {
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
struct ice_vsi **vsi; /* VSIs created by the driver */
+ struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct ice_vfs vfs;
@@ -594,6 +616,8 @@ struct ice_pf {
u16 num_dmac_chnl_fltrs;
struct hlist_head tc_flower_fltr_list;
+ u64 supported_rxdids;
+
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1bdc70aa979d..958c1e435232 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -848,9 +848,9 @@ struct ice_aqc_txsched_elem {
u8 generic;
#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
-#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
+#define ICE_AQC_ELEM_GENERIC_PRIO_M GENMASK(3, 1)
#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
-#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
+#define ICE_AQC_ELEM_GENERIC_SP_M GENMASK(4, 4)
#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
(0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index e864634d66bc..554095b25f44 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -389,7 +389,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
*/
- rlan_ctx.base = ring->dma >> 7;
+ rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 039342a0ed15..d02b55b6aa9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1105,6 +1105,9 @@ int ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
+ /* init xarray for identifying scheduling nodes uniquely */
+ xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
+
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
@@ -2945,8 +2948,8 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
@@ -4600,7 +4603,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
q_ctx->q_teid = le32_to_cpu(node.node_teid);
/* add a leaf node into scheduler tree queue layer */
- status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
if (!status)
status = ice_sched_replay_q_bw(pi, q_ctx);
@@ -4835,7 +4838,7 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
- &node);
+ &node, NULL);
if (ret)
break;
qset_teid[i] = le32_to_cpu(node.node_teid);
@@ -5512,3 +5515,40 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
ICE_FW_API_REPORT_DFLT_CFG_MIN,
ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
+
+/* each of the indexes into the following array match the speed of a return
+ * value from the list of AQ returned speeds like the range:
+ * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
+ * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
+ * array. The array is defined as 15 elements long because the link_speed
+ * returned by the firmware is a 16 bit * value, but is indexed
+ * by [fls(speed) - 1]
+ */
+static const u32 ice_aq_to_link_speed[15] = {
+ SPEED_10, /* BIT(0) */
+ SPEED_100,
+ SPEED_1000,
+ SPEED_2500,
+ SPEED_5000,
+ SPEED_10000,
+ SPEED_20000,
+ SPEED_25000,
+ SPEED_40000,
+ SPEED_50000,
+ SPEED_100000, /* BIT(10) */
+ 0,
+ 0,
+ 0,
+ 0 /* BIT(14) */
+};
+
+/**
+ * ice_get_link_speed - get integer speed from table
+ * @index: array index from fls(aq speed) - 1
+ *
+ * Returns: u32 value containing integer speed
+ */
+u32 ice_get_link_speed(u16 index)
+{
+ return ice_aq_to_link_speed[index];
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8b6712b92e84..4c6a0b5c9304 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -163,6 +163,7 @@ int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
+u32 ice_get_link_speed(u16 index);
int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 0b146a0d4205..6be02f9b0b8c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1580,7 +1580,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
- status = ice_sched_add_node(pi, 1, &elem);
+ status = ice_sched_add_node(pi, 1, &elem, NULL);
if (status)
break;
/* update the TC number */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index add90e75f05c..4f24d441c35e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -3,6 +3,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
+#include "ice_devlink.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
@@ -364,6 +365,12 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
+ if (pf->hw.port_info->is_custom_tx_enabled) {
+ dev_err(dev, "Custom Tx scheduler feature enabled, can't configure DCB\n");
+ return -EBUSY;
+ }
+ ice_tear_down_devlink_rate_tree(pf);
+
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
@@ -874,6 +881,9 @@ void ice_update_dcb_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
+ if (ice_is_reset_in_progress(pf->state))
+ pf->stat_prev_loaded = false;
+
for (i = 0; i < 8; i++) {
ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded,
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index e6ec20079ced..8286e47b4bae 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -8,6 +8,7 @@
#include "ice_devlink.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
+#include "ice_dcb_lib.h"
static int ice_active_port_option = -1;
@@ -310,12 +311,6 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
}
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
- goto out_free_ctx;
- }
-
ice_info_get_dsn(pf, ctx);
err = devlink_info_serial_number_put(req, ctx->buf);
@@ -713,6 +708,490 @@ ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
return ice_devlink_port_split(devlink, port, 1, extack);
}
+/**
+ * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree
+ * @pf: pf struct
+ *
+ * This function tears down tree exported during VF's creation.
+ */
+void ice_tear_down_devlink_rate_tree(struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ devlink = priv_to_devlink(pf);
+
+ devl_lock(devlink);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->devlink_port.devlink_rate)
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
+}
+
+/**
+ * ice_enable_custom_tx - try to enable custom Tx feature
+ * @pf: pf struct
+ *
+ * This function tries to enable custom Tx feature,
+ * it's not possible to enable it, if DCB or ADQ is active.
+ */
+static bool ice_enable_custom_tx(struct ice_pf *pf)
+{
+ struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (pi->is_custom_tx_enabled)
+ /* already enabled, return true */
+ return true;
+
+ if (ice_is_adq_active(pf)) {
+ dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n");
+ return false;
+ }
+
+ if (ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB active, can't modify Tx scheduler tree\n");
+ return false;
+ }
+
+ pi->is_custom_tx_enabled = true;
+
+ return true;
+}
+
+/**
+ * ice_traverse_tx_tree - traverse Tx scheduler tree
+ * @devlink: devlink struct
+ * @node: current node, used for recursion
+ * @tc_node: tc_node struct, that is treated as a root
+ * @pf: pf struct
+ *
+ * This function traverses Tx scheduler tree and exports
+ * entire structure to the devlink-rate.
+ */
+static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
+ struct ice_sched_node *tc_node, struct ice_pf *pf)
+{
+ struct devlink_rate *rate_node = NULL;
+ struct ice_vf *vf;
+ int i;
+
+ if (node->parent == tc_node) {
+ /* create root node */
+ rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->vf) {
+ vf = pf->vsi[node->vsi_handle]->vf;
+ if (!vf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&vf->devlink_port, node,
+ node->parent->rate_node);
+ } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
+ node->parent->rate_node) {
+ rate_node = devl_rate_node_create(devlink, node, node->name,
+ node->parent->rate_node);
+ }
+
+ if (rate_node && !IS_ERR(rate_node))
+ node->rate_node = rate_node;
+
+ for (i = 0; i < node->num_children; i++)
+ ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
+}
+
+/**
+ * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate
+ * @devlink: devlink struct
+ * @vsi: main vsi struct
+ *
+ * This function finds a root node, then calls ice_traverse_tx tree, which
+ * traverses the tree and exports it's contents to devlink rate.
+ */
+int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
+{
+ struct ice_port_info *pi = vsi->port_info;
+ struct ice_sched_node *tc_node;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ tc_node = pi->root->children[0];
+ mutex_lock(&pi->sched_lock);
+ devl_lock(devlink);
+ for (i = 0; i < tc_node->num_children; i++)
+ ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
+ devl_unlock(devlink);
+ mutex_unlock(&pi->sched_lock);
+
+ return 0;
+}
+
+/**
+ * ice_set_object_tx_share - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @bw: bandwidth in bytes per second
+ * @extack: extended netdev ack structure
+ *
+ * This function sets ICE_MIN_BW scheduling BW limit.
+ */
+static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
+ u64 bw, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ mutex_lock(&pi->sched_lock);
+ /* converts bytes per second to kilo bits per second */
+ node->tx_share = div_u64(bw, 125);
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share");
+
+ return status;
+}
+
+/**
+ * ice_set_object_tx_max - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @bw: bandwidth in bytes per second
+ * @extack: extended netdev ack structure
+ *
+ * This function sets ICE_MAX_BW scheduling BW limit.
+ */
+static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
+ u64 bw, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ mutex_lock(&pi->sched_lock);
+ /* converts bytes per second value to kilo bits per second */
+ node->tx_max = div_u64(bw, 125);
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max");
+
+ return status;
+}
+
+/**
+ * ice_set_object_tx_priority - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @priority: value representing priority for strict priority arbitration
+ * @extack: extended netdev ack structure
+ *
+ * This function sets priority of node among siblings.
+ */
+static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+ u32 priority, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ if (node->tx_priority >= 8) {
+ NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pi->sched_lock);
+ node->tx_priority = priority;
+ status = ice_sched_set_node_priority(pi, node, node->tx_priority);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority");
+
+ return status;
+}
+
+/**
+ * ice_set_object_tx_weight - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @weight: value represeting relative weight for WFQ arbitration
+ * @extack: extended netdev ack structure
+ *
+ * This function sets node weight for WFQ algorithm.
+ */
+static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
+ u32 weight, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ if (node->tx_weight > 200 || node->tx_weight < 1) {
+ NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pi->sched_lock);
+ node->tx_weight = weight;
+ status = ice_sched_set_node_weight(pi, node, node->tx_weight);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight");
+
+ return status;
+}
+
+/**
+ * ice_get_pi_from_dev_rate - get port info from devlink_rate
+ * @rate_node: devlink struct instance
+ *
+ * This function returns corresponding port_info struct of devlink_rate
+ */
+static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node)
+{
+ struct ice_pf *pf = devlink_priv(rate_node->devlink);
+
+ return ice_get_main_vsi(pf)->port_info;
+}
+
+static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node;
+ struct ice_port_info *pi;
+
+ pi = ice_get_pi_from_dev_rate(rate_node);
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ /* preallocate memory for ice_sched_node */
+ node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ *priv = node;
+
+ return 0;
+}
+
+static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_port_info *pi;
+
+ pi = ice_get_pi_from_dev_rate(rate_node);
+ tc_node = pi->root->children[0];
+ node = priv;
+
+ if (!rate_node->parent || !node || tc_node == node || !extack)
+ return 0;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ /* can't allow to delete a node with children */
+ if (node->num_children)
+ return -EINVAL;
+
+ mutex_lock(&pi->sched_lock);
+ ice_free_sched_node(pi, node);
+ mutex_unlock(&pi->sched_lock);
+
+ return 0;
+}
+
+static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf),
+ node, tx_max, extack);
+}
+
+static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node,
+ tx_share, extack);
+}
+
+static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv,
+ u32 tx_priority, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node,
+ tx_priority, extack);
+}
+
+static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv,
+ u32 tx_weight, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node,
+ tx_weight, extack);
+}
+
+static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_max, extack);
+}
+
+static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_share, extack);
+}
+
+static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv,
+ u32 tx_priority, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_priority, extack);
+}
+
+static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv,
+ u32 tx_weight, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_weight, extack);
+}
+
+static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate);
+ struct ice_sched_node *tc_node, *node, *parent_node;
+ u16 num_nodes_added;
+ u32 first_node_teid;
+ u32 node_teid;
+ int status;
+
+ tc_node = pi->root->children[0];
+ node = priv;
+
+ if (!extack)
+ return 0;
+
+ if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink)))
+ return -EBUSY;
+
+ if (!parent) {
+ if (!node || tc_node == node || node->num_children)
+ return -EINVAL;
+
+ mutex_lock(&pi->sched_lock);
+ ice_free_sched_node(pi, node);
+ mutex_unlock(&pi->sched_lock);
+
+ return 0;
+ }
+
+ parent_node = parent_priv;
+
+ /* if the node doesn't exist, create it */
+ if (!node->parent) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_add_elems(pi, tc_node, parent_node,
+ parent_node->tx_sched_layer + 1,
+ 1, &num_nodes_added, &first_node_teid,
+ &node);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't add a new node");
+ return status;
+ }
+
+ if (devlink_rate->tx_share)
+ ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack);
+ if (devlink_rate->tx_max)
+ ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack);
+ if (devlink_rate->tx_priority)
+ ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack);
+ if (devlink_rate->tx_weight)
+ ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack);
+ } else {
+ node_teid = le32_to_cpu(node->info.node_teid);
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent");
+ }
+
+ return status;
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
@@ -725,6 +1204,22 @@ static const struct devlink_ops ice_devlink_ops = {
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
+
+ .rate_node_new = ice_devlink_rate_node_new,
+ .rate_node_del = ice_devlink_rate_node_del,
+
+ .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set,
+ .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set,
+ .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set,
+ .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set,
+
+ .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set,
+ .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set,
+ .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set,
+ .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set,
+
+ .rate_leaf_parent_set = ice_devlink_set_parent,
+ .rate_node_parent_set = ice_devlink_set_parent,
};
static int
@@ -1033,12 +1528,7 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
*/
void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- struct devlink_port *devlink_port;
-
- devlink_port = &pf->devlink_port;
-
- devlink_port_type_clear(devlink_port);
- devlink_port_unregister(devlink_port);
+ devlink_port_unregister(&pf->devlink_port);
}
/**
@@ -1094,31 +1584,28 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
*/
void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
- struct devlink_port *devlink_port;
-
- devlink_port = &vf->devlink_port;
-
- devlink_port_type_clear(devlink_port);
- devlink_port_unregister(devlink_port);
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ devlink_port_unregister(&vf->devlink_port);
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+static const struct devlink_region_ops ice_nvm_region_ops;
+static const struct devlink_region_ops ice_sram_region_ops;
+
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
- * @ops: the devlink region being snapshotted
+ * @ops: the devlink region to snapshot
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
- * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
- * the nvm-flash devlink region. It captures a snapshot of the full NVM flash
- * contents, including both banks of flash. This snapshot can later be viewed
- * via the devlink-region interface.
+ * This function is called in response to a DEVLINK_CMD_REGION_NEW for either
+ * the nvm-flash or shadow-ram region.
*
- * It captures the flash using the FLASH_ONLY bit set when reading via
- * firmware, so it does not read the current Shadow RAM contents. For that,
- * use the shadow-ram region.
+ * It captures a snapshot of the NVM or Shadow RAM flash contents. This
+ * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink
+ * interface.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
@@ -1130,17 +1617,27 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ bool read_shadow_ram;
u8 *nvm_data, *tmp, i;
u32 nvm_size, left;
s8 num_blks;
int status;
- nvm_size = hw->flash.flash_size;
+ if (ops == &ice_nvm_region_ops) {
+ read_shadow_ram = false;
+ nvm_size = hw->flash.flash_size;
+ } else if (ops == &ice_sram_region_ops) {
+ read_shadow_ram = true;
+ nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
+ return -EOPNOTSUPP;
+ }
+
nvm_data = vzalloc(nvm_size);
if (!nvm_data)
return -ENOMEM;
-
num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
tmp = nvm_data;
left = nvm_size;
@@ -1164,7 +1661,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
}
status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
- &read_sz, tmp, false);
+ &read_sz, tmp, read_shadow_ram);
if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
read_sz, status, hw->adminq.sq_last_status);
@@ -1185,62 +1682,69 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
}
/**
- * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents
+ * ice_devlink_nvm_read - Read a portion of NVM flash contents
* @devlink: the devlink instance
- * @ops: the devlink region being snapshotted
+ * @ops: the devlink region to snapshot
* @extack: extended ACK response structure
- * @data: on exit points to snapshot data buffer
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
*
- * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
- * the shadow-ram devlink region. It captures a snapshot of the shadow ram
- * contents. This snapshot can later be viewed via the devlink-region
- * interface.
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * It reads from either the nvm-flash or shadow-ram region contents.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
*/
-static int
-ice_devlink_sram_snapshot(struct devlink *devlink,
- const struct devlink_region_ops __always_unused *ops,
- struct netlink_ext_ack *extack, u8 **data)
+static int ice_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- u8 *sram_data;
- u32 sram_size;
- int err;
+ bool read_shadow_ram;
+ u64 nvm_size;
+ int status;
- sram_size = hw->flash.sr_words * 2u;
- sram_data = vzalloc(sram_size);
- if (!sram_data)
- return -ENOMEM;
+ if (ops == &ice_nvm_region_ops) {
+ read_shadow_ram = false;
+ nvm_size = hw->flash.flash_size;
+ } else if (ops == &ice_sram_region_ops) {
+ read_shadow_ram = true;
+ nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
+ return -EOPNOTSUPP;
+ }
- err = ice_acquire_nvm(hw, ICE_RES_READ);
- if (err) {
+ if (offset + size >= nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- err, hw->adminq.sq_last_status);
+ status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- vfree(sram_data);
- return err;
+ return -EIO;
}
- /* Read from the Shadow RAM, rather than directly from NVM */
- err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true);
- if (err) {
+ status = ice_read_flat_nvm(hw, (u32)offset, &size, data,
+ read_shadow_ram);
+ if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
- sram_size, err, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to read Shadow RAM contents");
+ size, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
- vfree(sram_data);
- return err;
+ return -EIO;
}
-
ice_release_nvm(hw);
- *data = sram_data;
-
return 0;
}
@@ -1292,12 +1796,14 @@ static const struct devlink_region_ops ice_nvm_region_ops = {
.name = "nvm-flash",
.destructor = vfree,
.snapshot = ice_devlink_nvm_snapshot,
+ .read = ice_devlink_nvm_read,
};
static const struct devlink_region_ops ice_sram_region_ops = {
.name = "shadow-ram",
.destructor = vfree,
- .snapshot = ice_devlink_sram_snapshot,
+ .snapshot = ice_devlink_nvm_snapshot,
+ .read = ice_devlink_nvm_read,
};
static const struct devlink_region_ops ice_devcaps_region_ops = {
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index fe006d9946f8..6ec96779f52e 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -18,4 +18,7 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
+int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
+void ice_tear_down_devlink_rate_tree(struct ice_pf *pf);
+
#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index b7be84bbe72d..4191994d8f3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -151,6 +151,175 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
+#define GLDCB_TLPM_PCI_DM 0x000A0180
+ GLDCB_TLPM_PCI_DM,
+#define GLDCB_TLPM_TC2PFC 0x000A0194
+ GLDCB_TLPM_TC2PFC,
+#define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4))
+ TCDCB_TLPM_WAIT_DM(0),
+ TCDCB_TLPM_WAIT_DM(1),
+ TCDCB_TLPM_WAIT_DM(2),
+ TCDCB_TLPM_WAIT_DM(3),
+ TCDCB_TLPM_WAIT_DM(4),
+ TCDCB_TLPM_WAIT_DM(5),
+ TCDCB_TLPM_WAIT_DM(6),
+ TCDCB_TLPM_WAIT_DM(7),
+ TCDCB_TLPM_WAIT_DM(8),
+ TCDCB_TLPM_WAIT_DM(9),
+ TCDCB_TLPM_WAIT_DM(10),
+ TCDCB_TLPM_WAIT_DM(11),
+ TCDCB_TLPM_WAIT_DM(12),
+ TCDCB_TLPM_WAIT_DM(13),
+ TCDCB_TLPM_WAIT_DM(14),
+ TCDCB_TLPM_WAIT_DM(15),
+ TCDCB_TLPM_WAIT_DM(16),
+ TCDCB_TLPM_WAIT_DM(17),
+ TCDCB_TLPM_WAIT_DM(18),
+ TCDCB_TLPM_WAIT_DM(19),
+ TCDCB_TLPM_WAIT_DM(20),
+ TCDCB_TLPM_WAIT_DM(21),
+ TCDCB_TLPM_WAIT_DM(22),
+ TCDCB_TLPM_WAIT_DM(23),
+ TCDCB_TLPM_WAIT_DM(24),
+ TCDCB_TLPM_WAIT_DM(25),
+ TCDCB_TLPM_WAIT_DM(26),
+ TCDCB_TLPM_WAIT_DM(27),
+ TCDCB_TLPM_WAIT_DM(28),
+ TCDCB_TLPM_WAIT_DM(29),
+ TCDCB_TLPM_WAIT_DM(30),
+ TCDCB_TLPM_WAIT_DM(31),
+#define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90
+ GLPCI_WATMK_CLNT_PIPEMON,
+#define GLPCI_CUR_CLNT_COMMON 0x000BFD84
+ GLPCI_CUR_CLNT_COMMON,
+#define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88
+ GLPCI_CUR_CLNT_PIPEMON,
+#define GLPCI_PCIERR 0x0009DEB0
+ GLPCI_PCIERR,
+#define GLPSM_DEBUG_CTL_STATUS 0x000B0600
+ GLPSM_DEBUG_CTL_STATUS,
+#define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680
+ GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT,
+#define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684
+ GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT,
+#define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688
+ GLPSM0_DEBUG_DT_OUT_OF_WINDOW,
+#define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C
+ GLPSM0_DEBUG_INTF_HW_ERROR_DETECT,
+#define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0
+ GLPSM0_DEBUG_MISC_HW_ERROR_DETECT,
+#define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80
+ GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT,
+#define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84
+ GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT,
+#define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88
+ GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT,
+#define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C
+ GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT,
+#define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90
+ GLPSM1_DEBUG_MISC_HW_ERROR_DETECT,
+#define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680
+ GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT,
+#define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684
+ GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT,
+#define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688
+ GLPSM2_DEBUG_MISC_HW_ERROR_DETECT,
+#define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4))
+ GLTDPU_TCLAN_COMP_BOB(1),
+ GLTDPU_TCLAN_COMP_BOB(2),
+ GLTDPU_TCLAN_COMP_BOB(3),
+ GLTDPU_TCLAN_COMP_BOB(4),
+ GLTDPU_TCLAN_COMP_BOB(5),
+ GLTDPU_TCLAN_COMP_BOB(6),
+ GLTDPU_TCLAN_COMP_BOB(7),
+ GLTDPU_TCLAN_COMP_BOB(8),
+#define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4))
+ GLTDPU_TCB_CMD_BOB(1),
+ GLTDPU_TCB_CMD_BOB(2),
+ GLTDPU_TCB_CMD_BOB(3),
+ GLTDPU_TCB_CMD_BOB(4),
+ GLTDPU_TCB_CMD_BOB(5),
+ GLTDPU_TCB_CMD_BOB(6),
+ GLTDPU_TCB_CMD_BOB(7),
+ GLTDPU_TCB_CMD_BOB(8),
+#define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4))
+ GLTDPU_PSM_UPDATE_BOB(1),
+ GLTDPU_PSM_UPDATE_BOB(2),
+ GLTDPU_PSM_UPDATE_BOB(3),
+ GLTDPU_PSM_UPDATE_BOB(4),
+ GLTDPU_PSM_UPDATE_BOB(5),
+ GLTDPU_PSM_UPDATE_BOB(6),
+ GLTDPU_PSM_UPDATE_BOB(7),
+ GLTDPU_PSM_UPDATE_BOB(8),
+#define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4))
+ GLTCB_CMD_IN_BOB(1),
+ GLTCB_CMD_IN_BOB(2),
+ GLTCB_CMD_IN_BOB(3),
+ GLTCB_CMD_IN_BOB(4),
+ GLTCB_CMD_IN_BOB(5),
+ GLTCB_CMD_IN_BOB(6),
+ GLTCB_CMD_IN_BOB(7),
+ GLTCB_CMD_IN_BOB(8),
+#define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8),
+#define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8),
+#define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4))
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8),
+#define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8),
+#define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8),
+#define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4))
+ PRTDCB_TCUPM_REG_CM(0),
+ PRTDCB_TCUPM_REG_CM(1),
+ PRTDCB_TCUPM_REG_CM(2),
+ PRTDCB_TCUPM_REG_CM(3),
+#define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4))
+ PRTDCB_TCUPM_REG_DM(0),
+ PRTDCB_TCUPM_REG_DM(1),
+ PRTDCB_TCUPM_REG_DM(2),
+ PRTDCB_TCUPM_REG_DM(3),
+#define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4))
+ PRTDCB_TLPM_REG_DM(0),
+ PRTDCB_TLPM_REG_DM(1),
+ PRTDCB_TLPM_REG_DM(2),
+ PRTDCB_TLPM_REG_DM(3),
};
struct ice_priv_flag {
@@ -1375,9 +1544,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_txq(vsi, j) {
tx_ring = READ_ONCE(vsi->tx_rings[j]);
- if (tx_ring) {
- data[i++] = tx_ring->stats.pkts;
- data[i++] = tx_ring->stats.bytes;
+ if (tx_ring && tx_ring->ring_stats) {
+ data[i++] = tx_ring->ring_stats->stats.pkts;
+ data[i++] = tx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1386,9 +1555,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_rxq(vsi, j) {
rx_ring = READ_ONCE(vsi->rx_rings[j]);
- if (rx_ring) {
- data[i++] = rx_ring->stats.pkts;
- data[i++] = rx_ring->stats.bytes;
+ if (rx_ring && rx_ring->ring_stats) {
+ data[i++] = rx_ring->ring_stats->stats.pkts;
+ data[i++] = rx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index d16738a3d3a7..a92dc9a16035 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -110,6 +110,9 @@
#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
+#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index b3baf7c3f910..89f986a75cc8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -908,17 +908,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
return ice_ptype_lkup[ptype];
}
-#define ICE_LINK_SPEED_UNKNOWN 0
-#define ICE_LINK_SPEED_10MBPS 10
-#define ICE_LINK_SPEED_100MBPS 100
-#define ICE_LINK_SPEED_1000MBPS 1000
-#define ICE_LINK_SPEED_2500MBPS 2500
-#define ICE_LINK_SPEED_5000MBPS 5000
-#define ICE_LINK_SPEED_10000MBPS 10000
-#define ICE_LINK_SPEED_20000MBPS 20000
-#define ICE_LINK_SPEED_25000MBPS 25000
-#define ICE_LINK_SPEED_40000MBPS 40000
-#define ICE_LINK_SPEED_50000MBPS 50000
-#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 7276badfa19e..94aa834cd9a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -448,6 +448,49 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
}
/**
+ * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
+ * @vsi: VSI pointer
+ */
+static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+ if (!pf->vsi_stats)
+ return -ENOENT;
+
+ vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
+ if (!vsi_stat)
+ return -ENOMEM;
+
+ vsi_stat->tx_ring_stats =
+ kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
+ GFP_KERNEL);
+ if (!vsi_stat->tx_ring_stats)
+ goto err_alloc_tx;
+
+ vsi_stat->rx_ring_stats =
+ kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
+ GFP_KERNEL);
+ if (!vsi_stat->rx_ring_stats)
+ goto err_alloc_rx;
+
+ pf->vsi_stats[vsi->idx] = vsi_stat;
+
+ return 0;
+
+err_alloc_rx:
+ kfree(vsi_stat->rx_ring_stats);
+err_alloc_tx:
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+ return -ENOMEM;
+}
+
+/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @vsi_type: type of VSI
@@ -560,6 +603,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
if (vsi->type == ICE_VSI_CTRL && vf)
vf->ctrl_vsi_idx = vsi->idx;
+
+ /* allocate memory for Tx/Rx ring stat pointers */
+ if (ice_vsi_alloc_stat_arrays(vsi))
+ goto err_rings;
+
goto unlock_pf;
err_rings:
@@ -1536,6 +1584,106 @@ err_out:
}
/**
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ ice_for_each_alloc_txq(vsi, i) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+
+ ice_for_each_alloc_rxq(vsi, i) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat->rx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stats;
+ struct ice_pf *pf = vsi->back;
+ u16 i;
+
+ vsi_stats = pf->vsi_stats[vsi->idx];
+ tx_ring_stats = vsi_stats->tx_ring_stats;
+ rx_ring_stats = vsi_stats->rx_ring_stats;
+
+ /* Allocate Tx ring stats */
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_tx_ring *ring;
+
+ ring = vsi->tx_rings[i];
+ ring_stats = tx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(tx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ /* Allocate Rx ring stats */
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *ring;
+
+ ring = vsi->rx_rings[i];
+ ring_stats = rx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(rx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_free_stats(vsi);
+ return -ENOMEM;
+}
+
+/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1795,11 +1943,15 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
{
struct ice_eth_stats *prev_es, *cur_es;
struct ice_hw *hw = &vsi->back->hw;
+ struct ice_pf *pf = vsi->back;
u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
+ if (ice_is_reset_in_progress(pf->state))
+ vsi->stat_offsets_loaded = false;
+
ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_bytes, &cur_es->rx_bytes);
@@ -2576,6 +2728,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
ice_vsi_map_rings_to_vectors(vsi);
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -2614,6 +2770,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -2627,6 +2786,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
break;
default:
/* clean up the resources and exit */
@@ -2686,6 +2850,7 @@ unroll_vector_base:
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
+ ice_vsi_free_stats(vsi);
ice_vsi_delete(vsi);
unroll_get_qs:
ice_vsi_put_qs(vsi);
@@ -3077,7 +3242,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
ice_vsi_clear_rings(vsi);
-
+ ice_vsi_free_stats(vsi);
ice_vsi_put_qs(vsi);
/* retain SW VSI data structure since it is needed to unregister and
@@ -3205,6 +3370,47 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * @vsi: VSI pointer
+ * @prev_txq: Number of Tx rings before ring reallocation
+ * @prev_rxq: Number of Rx rings before ring reallocation
+ */
+static int
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (!prev_txq || !prev_rxq)
+ return 0;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+
+ if (vsi->num_txq < prev_txq) {
+ for (i = vsi->num_txq; i < prev_txq; i++) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+ }
+
+ if (vsi->num_rxq < prev_rxq) {
+ for (i = vsi->num_rxq; i < prev_rxq; i++) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
* @init_vsi: is this an initialization or a reconfigure of the VSI
@@ -3215,10 +3421,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce;
+ int ret, i, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
enum ice_vsi_type vtype;
struct ice_pf *pf;
- int ret, i;
if (!vsi)
return -EINVAL;
@@ -3237,6 +3443,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+ prev_txq = vsi->num_txq;
+ prev_rxq = vsi->num_rxq;
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (ret)
@@ -3303,7 +3512,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto err_vectors;
+
ice_vsi_map_rings_to_vectors(vsi);
+
+ vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@@ -3340,6 +3555,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto err_vectors;
+
+ vsi->stat_offsets_loaded = false;
break;
case ICE_VSI_CHNL:
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3387,6 +3607,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
+
+ if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
+ goto err_vectors;
+
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
@@ -3728,9 +3952,9 @@ static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes
*/
void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
- u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
- u64_stats_update_end(&tx_ring->syncp);
+ u64_stats_update_begin(&tx_ring->ring_stats->syncp);
+ ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_update_end(&tx_ring->ring_stats->syncp);
}
/**
@@ -3741,9 +3965,9 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
*/
void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
- u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
- u64_stats_update_end(&rx_ring->syncp);
+ u64_stats_update_begin(&rx_ring->ring_stats->syncp);
+ ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_update_end(&rx_ring->ring_stats->syncp);
}
/**
@@ -3850,33 +4074,11 @@ int ice_clear_dflt_vsi(struct ice_vsi *vsi)
*/
int ice_get_link_speed_mbps(struct ice_vsi *vsi)
{
- switch (vsi->port_info->phy.link_info.link_speed) {
- case ICE_AQ_LINK_SPEED_100GB:
- return SPEED_100000;
- case ICE_AQ_LINK_SPEED_50GB:
- return SPEED_50000;
- case ICE_AQ_LINK_SPEED_40GB:
- return SPEED_40000;
- case ICE_AQ_LINK_SPEED_25GB:
- return SPEED_25000;
- case ICE_AQ_LINK_SPEED_20GB:
- return SPEED_20000;
- case ICE_AQ_LINK_SPEED_10GB:
- return SPEED_10000;
- case ICE_AQ_LINK_SPEED_5GB:
- return SPEED_5000;
- case ICE_AQ_LINK_SPEED_2500MB:
- return SPEED_2500;
- case ICE_AQ_LINK_SPEED_1000MB:
- return SPEED_1000;
- case ICE_AQ_LINK_SPEED_100MB:
- return SPEED_100;
- case ICE_AQ_LINK_SPEED_10MB:
- return SPEED_10;
- case ICE_AQ_LINK_SPEED_UNKNOWN:
- default:
- return 0;
- }
+ unsigned int link_speed;
+
+ link_speed = vsi->port_info->phy.link_info.link_speed;
+
+ return (int)ice_get_link_speed(fls(link_speed) - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index ca2898467dcb..2b23b4714a26 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -130,12 +130,17 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
ice_for_each_txq(vsi, i) {
struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+ struct ice_ring_stats *ring_stats;
if (!tx_ring)
continue;
if (ice_ring_ch_enabled(tx_ring))
continue;
+ ring_stats = tx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+
if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
@@ -144,8 +149,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* prev_pkt would be negative if there was no
* pending work.
*/
- packets = tx_ring->stats.pkts & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt == packets) {
+ packets = ring_stats->stats.pkts & INT_MAX;
+ if (ring_stats->tx_stats.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
@@ -155,7 +160,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* to ice_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt =
+ ring_stats->tx_stats.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
@@ -299,20 +304,6 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
}
/**
- * ice_get_devlink_port - Get devlink port from netdev
- * @netdev: the netdevice structure
- */
-static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
-
- if (!ice_is_switchdev_running(pf))
- return NULL;
-
- return &pf->devlink_port;
-}
-
-/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -2560,13 +2551,20 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring_stats *ring_stats;
struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
-
if (!xdp_ring)
goto free_xdp_rings;
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats) {
+ ice_free_tx_ring(xdp_ring);
+ goto free_xdp_rings;
+ }
+
+ xdp_ring->ring_stats = ring_stats;
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
@@ -2589,9 +2587,13 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
return 0;
free_xdp_rings:
- for (; i >= 0; i--)
- if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ for (; i >= 0; i--) {
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
+ kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+ vsi->xdp_rings[i]->ring_stats = NULL;
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
+ }
return -ENOMEM;
}
@@ -2792,6 +2794,8 @@ free_qmap:
synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
}
+ kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+ vsi->xdp_rings[i]->ring_stats = NULL;
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
@@ -4603,6 +4607,7 @@ static int ice_register_netdev(struct ice_pf *pf)
if (err)
goto err_devlink_create;
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
@@ -4611,8 +4616,6 @@ static int ice_register_netdev(struct ice_pf *pf)
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
-
return 0;
err_register_netdev:
ice_devlink_destroy_pf_port(pf);
@@ -4771,11 +4774,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
+ pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+ sizeof(*pf->vsi_stats), GFP_KERNEL);
+ if (!pf->vsi_stats) {
+ err = -ENOMEM;
+ goto err_init_vsi_unroll;
+ }
+
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_vsi_unroll;
+ goto err_init_vsi_stats_unroll;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4956,6 +4966,9 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_stats_unroll:
+ devm_kfree(dev, pf->vsi_stats);
+ pf->vsi_stats = NULL;
err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
@@ -5078,6 +5091,8 @@ static void ice_remove(struct pci_dev *pdev)
continue;
ice_vsi_free_q_vectors(pf->vsi[i]);
}
+ devm_kfree(&pdev->dev, pf->vsi_stats);
+ pf->vsi_stats = NULL;
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
@@ -6370,10 +6385,10 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
*pkts = stats.pkts;
*bytes = stats.bytes;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
}
/**
@@ -6395,14 +6410,16 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (!ring)
+ if (!ring || !ring->ring_stats)
continue;
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
+ ring->ring_stats->stats, &pkts,
+ &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
+ vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
}
}
@@ -6412,6 +6429,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
+ struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
@@ -6436,12 +6454,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_ring_stats *ring_stats;
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ ring_stats = ring->ring_stats;
+ ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
+ ring_stats->stats, &pkts,
+ &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
- vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
- vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
+ vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
}
/* update XDP Tx rings counters */
@@ -6451,10 +6473,28 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_unlock();
- vsi->net_stats.tx_packets = vsi_stats->tx_packets;
- vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
- vsi->net_stats.rx_packets = vsi_stats->rx_packets;
- vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+ net_stats = &vsi->net_stats;
+ stats_prev = &vsi->net_stats_prev;
+
+ /* clear prev counters after reset */
+ if (vsi_stats->tx_packets < stats_prev->tx_packets ||
+ vsi_stats->rx_packets < stats_prev->rx_packets) {
+ stats_prev->tx_packets = 0;
+ stats_prev->tx_bytes = 0;
+ stats_prev->rx_packets = 0;
+ stats_prev->rx_bytes = 0;
+ }
+
+ /* update netdev counters */
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+
+ stats_prev->tx_packets = vsi_stats->tx_packets;
+ stats_prev->tx_bytes = vsi_stats->tx_bytes;
+ stats_prev->rx_packets = vsi_stats->rx_packets;
+ stats_prev->rx_bytes = vsi_stats->rx_bytes;
kfree(vsi_stats);
}
@@ -6516,6 +6556,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
+ if (ice_is_reset_in_progress(pf->state))
+ pf->stat_prev_loaded = false;
+
ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
@@ -8283,7 +8326,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
rule.rid = fltr->rid;
rule.rule_id = fltr->rule_id;
- rule.vsi_handle = fltr->dest_id;
+ rule.vsi_handle = fltr->dest_vsi_handle;
status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
if (status) {
if (status == -ENOENT)
@@ -8595,6 +8638,12 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
switch (mode) {
case TC_MQPRIO_MODE_CHANNEL:
+ if (pf->hw.port_info->is_custom_tx_enabled) {
+ dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
+ return -EBUSY;
+ }
+ ice_tear_down_devlink_rate_tree(pf);
+
ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
if (ret) {
netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
@@ -9108,5 +9157,4 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_get_devlink_port = ice_get_devlink_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 0f668468d141..13e75279e71c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1447,24 +1447,10 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_hw *hw = &pf->hw;
- u64 incval, diff;
- int neg_adj = 0;
+ u64 incval;
int err;
- incval = ice_base_incval(pf);
-
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
-
- diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm,
- 1000000ULL << 16);
- if (neg_adj)
- incval -= diff;
- else
- incval += diff;
-
+ incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
err = ice_ptp_write_incval_locked(hw, incval);
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 772b1f566d6e..1f8dd50db524 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -2963,16 +2963,18 @@ bool ice_ptp_lock(struct ice_hw *hw)
u32 hw_lock;
int i;
-#define MAX_TRIES 5
+#define MAX_TRIES 15
for (i = 0; i < MAX_TRIES; i++) {
hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
- if (!hw_lock)
- break;
+ if (hw_lock) {
+ /* Somebody is holding the lock */
+ usleep_range(5000, 6000);
+ continue;
+ }
- /* Somebody is holding the lock */
- usleep_range(10000, 20000);
+ break;
}
return !hw_lock;
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index bd31748aae1b..fd1f8b0ad0ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -6,6 +6,7 @@
#include "ice_devlink.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
+#include "ice_dcb_lib.h"
/**
* ice_repr_get_sw_port_id - get port ID associated with representor
@@ -134,14 +135,6 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
-static struct devlink_port *
-ice_repr_get_devlink_port(struct net_device *netdev)
-{
- struct ice_repr *repr = ice_netdev_to_repr(netdev);
-
- return &repr->vf->devlink_port;
-}
-
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
@@ -163,18 +156,20 @@ ice_repr_sp_stats64(const struct net_device *dev,
u64 pkts, bytes;
tx_ring = np->vsi->tx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+ ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
+ tx_ring->ring_stats->stats,
&pkts, &bytes);
stats->rx_packets = pkts;
stats->rx_bytes = bytes;
rx_ring = np->vsi->rx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+ ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
+ rx_ring->ring_stats->stats,
&pkts, &bytes);
stats->tx_packets = pkts;
stats->tx_bytes = bytes;
- stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
- rx_ring->rx_stats.alloc_buf_failed;
+ stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed;
return 0;
}
@@ -250,7 +245,6 @@ static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_open = ice_repr_open,
.ndo_stop = ice_repr_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
- .ndo_get_devlink_port = ice_repr_get_devlink_port,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
@@ -339,12 +333,11 @@ static int ice_repr_add(struct ice_vf *vf)
repr->netdev->max_mtu = ICE_MAX_MTU;
SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
err = ice_repr_reg_netdev(repr->netdev);
if (err)
goto err_netdev;
- devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
-
ice_virtchnl_set_repr_ops(vf);
return 0;
@@ -399,6 +392,7 @@ static void ice_repr_rem(struct ice_vf *vf)
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
+ struct devlink *devlink;
struct ice_vf *vf;
unsigned int bkt;
@@ -406,6 +400,14 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf)
ice_repr_rem(vf);
+
+ /* since all port representors are destroyed, there is
+ * no point in keeping the nodes
+ */
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
}
/**
@@ -414,6 +416,7 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
+ struct devlink *devlink;
struct ice_vf *vf;
unsigned int bkt;
int err;
@@ -426,6 +429,13 @@ int ice_repr_add_for_all_vfs(struct ice_pf *pf)
goto err;
}
+ /* only export if ADQ and DCB disabled */
+ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
+ return 0;
+
+ devlink = priv_to_devlink(pf);
+ ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
+
return 0;
err:
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 118595763bba..6d08b397df2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include <net/devlink.h>
#include "ice_sched.h"
/**
@@ -142,12 +143,14 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
* @pi: port information structure
* @layer: Scheduler layer of the node
* @info: Scheduler element information from firmware
+ * @prealloc_node: preallocated ice_sched_node struct for SW DB
*
* This function inserts a scheduler node to the SW DB.
*/
int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
- struct ice_aqc_txsched_elem_data *info)
+ struct ice_aqc_txsched_elem_data *info,
+ struct ice_sched_node *prealloc_node)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
@@ -176,7 +179,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (status)
return status;
- node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
+ if (prealloc_node)
+ node = prealloc_node;
+ else
+ node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
if (hw->max_children[layer]) {
@@ -355,6 +361,9 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
/* leaf nodes have no children */
if (node->children)
devm_kfree(ice_hw_to_dev(hw), node->children);
+
+ kfree(node->name);
+ xa_erase(&pi->sched_node_ids, node->id);
devm_kfree(ice_hw_to_dev(hw), node);
}
@@ -872,13 +881,15 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
* @first_node_teid: if new nodes are added then return the TEID of first node
+ * @prealloc_nodes: preallocated nodes struct for software DB
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-static int
+int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
- u16 *num_nodes_added, u32 *first_node_teid)
+ u16 *num_nodes_added, u32 *first_node_teid,
+ struct ice_sched_node **prealloc_nodes)
{
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
@@ -924,7 +935,11 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*num_nodes_added = num_nodes;
/* add nodes to the SW DB */
for (i = 0; i < num_nodes; i++) {
- status = ice_sched_add_node(pi, layer, &buf->generic[i]);
+ if (prealloc_nodes)
+ status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
+ else
+ status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
+
if (status) {
ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
@@ -940,6 +955,22 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
new_node->sibling = NULL;
new_node->tc_num = tc_node->tc_num;
+ new_node->tx_weight = ICE_SCHED_DFLT_BW_WT;
+ new_node->tx_share = ICE_SCHED_DFLT_BW;
+ new_node->tx_max = ICE_SCHED_DFLT_BW;
+ new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL);
+ if (!new_node->name)
+ return -ENOMEM;
+
+ status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX),
+ GFP_KERNEL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n",
+ status);
+ break;
+ }
+
+ snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id);
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
@@ -1003,7 +1034,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
- num_nodes_added, first_node_teid);
+ num_nodes_added, first_node_teid, NULL);
}
/**
@@ -1268,7 +1299,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
ICE_AQC_ELEM_TYPE_ENTRY_POINT)
hw->sw_entry_point_layer = j;
- status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
+ status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
if (status)
goto err_init_port;
}
@@ -2154,7 +2185,7 @@ ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
* This function removes the child from the old parent and adds it to a new
* parent
*/
-static void
+void
ice_sched_update_parent(struct ice_sched_node *new_parent,
struct ice_sched_node *node)
{
@@ -2188,7 +2219,7 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-static int
+int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
@@ -3560,7 +3591,7 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-static int
+int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
@@ -3597,6 +3628,57 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
}
/**
+ * ice_sched_set_node_priority - set node's priority
+ * @pi: port information structure
+ * @node: tree node
+ * @priority: number 0-7 representing priority among siblings
+ *
+ * This function sets priority of a node among it's siblings.
+ */
+int
+ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+ u16 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority);
+
+ return ice_sched_update_elem(pi->hw, node, &buf);
+}
+
+/**
+ * ice_sched_set_node_weight - set node's weight
+ * @pi: port information structure
+ * @node: tree node
+ * @weight: number 1-200 representing weight for WFQ
+ *
+ * This function sets weight of the node for WFQ algorithm.
+ */
+int
+ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+
+ data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
+ ICE_AQC_ELEM_VALID_GENERIC;
+ data->cir_bw.bw_alloc = cpu_to_le16(weight);
+ data->eir_bw.bw_alloc = cpu_to_le16(weight);
+
+ data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0);
+
+ return ice_sched_update_elem(pi->hw, node, &buf);
+}
+
+/**
* ice_sched_set_node_bw_lmt - set node's BW limit
* @pi: port information structure
* @node: tree node
@@ -3606,7 +3688,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
* EIR, or SRL. The caller needs to hold scheduler lock.
*/
-static int
+int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 4f91577fed56..9c100747445a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,8 @@
#include "ice_common.h"
+#define SCHED_NODE_NAME_MAX_LEN 32
+
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
@@ -69,6 +71,29 @@ int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
+
+int
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw);
+
+int
+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num);
+
+int
+ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer, u16 num_nodes,
+ u16 *num_nodes_added, u32 *first_node_teid,
+ struct ice_sched_node **prealloc_node);
+
+int
+ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
+ u16 num_items, u32 *list);
+
+int ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+ u16 priority);
+int ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight);
+
int ice_sched_init_port(struct ice_port_info *pi);
int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
@@ -81,7 +106,11 @@ struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
- struct ice_aqc_txsched_elem_data *info);
+ struct ice_aqc_txsched_elem_data *info,
+ struct ice_sched_node *prealloc_node);
+void
+ice_sched_update_parent(struct ice_sched_node *new_parent,
+ struct ice_sched_node *node);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index f68c555be4e9..faba0f857cd9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -724,7 +724,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
- fltr->dest_id = rule_added.vsi_handle;
+ fltr->dest_vsi_handle = rule_added.vsi_handle;
exit:
kfree(list);
@@ -732,6 +732,116 @@ exit:
}
/**
+ * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action)
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to tc_flower_filter
+ *
+ * Locate the VSI using specified queue. When ADQ is not enabled, always
+ * return input VSI, otherwise locate corresponding VSI based on per channel
+ * offset and qcount
+ */
+static struct ice_vsi *
+ice_locate_vsi_using_queue(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ int num_tc, tc, queue;
+
+ /* if ADQ is not active, passed VSI is the candidate VSI */
+ if (!ice_is_adq_active(vsi->back))
+ return vsi;
+
+ /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending
+ * upon queue number)
+ */
+ num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ queue = tc_fltr->action.fwd.q.queue;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ int qcount = vsi->mqprio_qopt.qopt.count[tc];
+ int offset = vsi->mqprio_qopt.qopt.offset[tc];
+
+ if (queue >= offset && queue < offset + qcount) {
+ /* for non-ADQ TCs, passed VSI is the candidate VSI */
+ if (tc < ICE_CHNL_START_TC)
+ return vsi;
+ else
+ return vsi->tc_map_vsi[tc];
+ }
+ }
+ return NULL;
+}
+
+static struct ice_rx_ring *
+ice_locate_rx_ring_using_queue(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ u16 queue = tc_fltr->action.fwd.q.queue;
+
+ return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL;
+}
+
+/**
+ * ice_tc_forward_action - Determine destination VSI and queue for the action
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * Validates the tc forward action and determines the destination VSI and queue
+ * for the forward action.
+ */
+static struct ice_vsi *
+ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_rx_ring *ring = NULL;
+ struct ice_vsi *ch_vsi = NULL;
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ u32 tc_class;
+
+ dev = ice_pf_to_dev(pf);
+
+ /* Get the destination VSI and/or destination queue and validate them */
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ tc_class = tc_fltr->action.fwd.tc.tc_class;
+ /* Select the destination VSI */
+ if (tc_class < ICE_CHNL_START_TC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because of unsupported destination");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ /* Locate ADQ VSI depending on hw_tc number */
+ ch_vsi = vsi->tc_map_vsi[tc_class];
+ break;
+ case ICE_FWD_TO_Q:
+ /* Locate the Rx queue */
+ ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr);
+ if (!ring) {
+ dev_err(dev,
+ "Unable to locate Rx queue for action fwd_to_queue: %u\n",
+ tc_fltr->action.fwd.q.queue);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Determine destination VSI even though the action is
+ * FWD_TO_QUEUE, because QUEUE is associated with VSI
+ */
+ ch_vsi = tc_fltr->dest_vsi;
+ break;
+ default:
+ dev_err(dev,
+ "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n",
+ tc_fltr->action.fltr_act);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */
+ if (!ch_vsi) {
+ dev_err(dev,
+ "Unable to add filter because specified destination VSI doesn't exist\n");
+ return ERR_PTR(-EINVAL);
+ }
+ return ch_vsi;
+}
+
+/**
* ice_add_tc_flower_adv_fltr - add appropriate filter rules
* @vsi: Pointer to VSI
* @tc_fltr: Pointer to TC flower filter structure
@@ -772,11 +882,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return -EOPNOTSUPP;
}
- /* get the channel (aka ADQ VSI) */
- if (tc_fltr->dest_vsi)
- ch_vsi = tc_fltr->dest_vsi;
- else
- ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+ /* validate forwarding action VSI and queue */
+ ch_vsi = ice_tc_forward_action(vsi, tc_fltr);
+ if (IS_ERR(ch_vsi))
+ return PTR_ERR(ch_vsi);
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
@@ -790,30 +899,40 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
}
rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
- if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
- if (!ch_vsi) {
- NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
- ret = -EINVAL;
- goto exit;
- }
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
rule_info.sw_act.vsi_handle = ch_vsi->idx;
- rule_info.priority = 7;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
- tc_fltr->action.tc_class,
+ tc_fltr->action.fwd.tc.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt);
- } else {
+ break;
+ case ICE_FWD_TO_Q:
+ /* HW queue number in global space */
+ rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
+ rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
+ tc_fltr->action.fwd.q.queue,
+ tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
+ break;
+ default:
rule_info.sw_act.flag |= ICE_FLTR_TX;
+ /* In case of Tx (LOOKUP_TX), src needs to be src VSI */
rule_info.sw_act.src = vsi->idx;
+ /* 'Rx' is false, direction of rule(LOOKUPTRX) */
rule_info.rx = false;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
+ break;
}
- /* specify the cookie as filter_rule_id */
- rule_info.fltr_rule_id = tc_fltr->cookie;
-
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
@@ -831,19 +950,14 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
*/
tc_fltr->rid = rule_added.rid;
tc_fltr->rule_id = rule_added.rule_id;
- if (tc_fltr->action.tc_class > 0 && ch_vsi) {
- /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
- * for PF ADQ filter, it is not yet set in tc_fltr,
- * hence store the dest_vsi ptr in tc_fltr
- */
- if (ch_vsi->type == ICE_VSI_CHNL)
- tc_fltr->dest_vsi = ch_vsi;
+ tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
+ if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
+ tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
+ tc_fltr->dest_vsi = ch_vsi;
/* keep track of advanced switch filter for
- * destination VSI (channel VSI)
+ * destination VSI
*/
ch_vsi->num_chnl_fltr++;
- /* in this case, dest_id is VSI handle (sw handle) */
- tc_fltr->dest_id = rule_added.vsi_handle;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
@@ -851,10 +965,22 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs++;
}
- dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
- lkups_cnt, flags,
- tc_fltr->action.tc_class, rule_added.rid,
- rule_added.rule_id, rule_added.vsi_handle);
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.fwd.tc.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+ break;
+ case ICE_FWD_TO_Q:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n",
+ lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
+ tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
+ rule_added.rule_id);
+ break;
+ default:
+ break;
+ }
exit:
kfree(list);
return ret;
@@ -1455,43 +1581,15 @@ ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
}
/**
- * ice_handle_tclass_action - Support directing to a traffic class
+ * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
- * Support directing traffic to a traffic class
+ * Prepare ADQ filter with the required additional header fields
*/
static int
-ice_handle_tclass_action(struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower,
- struct ice_tc_flower_fltr *fltr)
+ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
- struct ice_vsi *main_vsi;
-
- if (tc < 0) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
- return -EINVAL;
- }
- if (!tc) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
- return -EINVAL;
- }
-
- if (!(vsi->all_enatc & BIT(tc))) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
- return -EINVAL;
- }
-
- /* Redirect to a TC class or Queue Group */
- main_vsi = ice_get_main_vsi(vsi->back);
- if (!main_vsi || !main_vsi->netdev) {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unable to add filter because of invalid netdevice");
- return -EINVAL;
- }
-
if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_SRC_MAC))) {
@@ -1503,9 +1601,8 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
/* For ADQ, filter must include dest MAC address, otherwise unwanted
* packets with unrelated MAC address get delivered to ADQ VSIs as long
* as remaining filter criteria is satisfied such as dest IP address
- * and dest/src L4 port. Following code is trying to handle:
- * 1. For non-tunnel, if user specify MAC addresses, use them (means
- * this code won't do anything
+ * and dest/src L4 port. Below code handles the following cases:
+ * 1. For non-tunnel, if user specify MAC addresses, use them.
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
* 3. For tunnel, as of now TC-filter through flower classifier doesn't
@@ -1528,35 +1625,97 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
}
- /* validate specified dest MAC address, make sure either it belongs to
- * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
- * unicast MAC filter destined to main VSI.
- */
- if (!ice_mac_fltr_exist(&main_vsi->back->hw,
- fltr->outer_headers.l2_key.dst_mac,
- main_vsi->idx)) {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
- return -EINVAL;
- }
-
/* Make sure VLAN is already added to main VSI, before allowing ADQ to
* add a VLAN based filter such as MAC + VLAN + L4 port.
*/
if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
- if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
- main_vsi->idx)) {
+ if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
return -EINVAL;
}
}
+ return 0;
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class/queue-set
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+
+ /* user specified hw_tc (must be non-zero for ADQ TC), action is forward
+ * to hw_tc (i.e. ADQ channel number)
+ */
+ if (tc < ICE_CHNL_START_TC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of unsupported destination");
+ return -EOPNOTSUPP;
+ }
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
fltr->action.fltr_act = ICE_FWD_TO_VSI;
- fltr->action.tc_class = tc;
+ fltr->action.fwd.tc.tc_class = tc;
- return 0;
+ return ice_prep_adq_filter(vsi, fltr);
+}
+
+static int
+ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_vsi *ch_vsi = NULL;
+ u16 queue = act->rx_queue;
+
+ if (queue > vsi->num_rxq) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because specified queue is invalid");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_Q;
+ fltr->action.fwd.q.queue = queue;
+ /* determine corresponding HW queue */
+ fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
+
+ /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare
+ * ADQ switch filter
+ */
+ ch_vsi = ice_locate_vsi_using_queue(vsi, fltr);
+ if (!ch_vsi)
+ return -EINVAL;
+ fltr->dest_vsi = ch_vsi;
+ if (!ice_is_chnl_fltr(fltr))
+ return 0;
+
+ return ice_prep_adq_filter(vsi, fltr);
+}
+
+static int
+ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ switch (act->id) {
+ case FLOW_ACTION_RX_QUEUE_MAPPING:
+ /* forward to queue */
+ return ice_tc_forward_to_queue(vsi, fltr, act);
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1575,7 +1734,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
struct flow_action_entry *act;
- int i;
+ int i, err;
if (cls_flower->classid)
return ice_handle_tclass_action(vsi, cls_flower, fltr);
@@ -1584,21 +1743,13 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
return -EINVAL;
flow_action_for_each(i, act, flow_action) {
- if (ice_is_eswitch_mode_switchdev(vsi->back)) {
- int err = ice_eswitch_tc_parse_action(fltr, act);
-
- if (err)
- return err;
- continue;
- }
- /* Allow only one rule per filter */
-
- /* Drop action */
- if (act->id == FLOW_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
- return -EINVAL;
- }
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ err = ice_eswitch_tc_parse_action(fltr, act);
+ else
+ err = ice_tc_parse_action(vsi, fltr, act);
+ if (err)
+ return err;
+ continue;
}
return 0;
}
@@ -1618,7 +1769,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
- rule_rem.vsi_handle = fltr->dest_id;
+ rule_rem.vsi_handle = fltr->dest_vsi_handle;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
if (err == -ENOENT) {
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 92642faad595..d916d1e92aa3 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -45,7 +45,20 @@ struct ice_indr_block_priv {
};
struct ice_tc_flower_action {
- u32 tc_class;
+ /* forward action specific params */
+ union {
+ struct {
+ u32 tc_class; /* forward to hw_tc */
+ u32 rsvd;
+ } tc;
+ struct {
+ u16 queue; /* forward to queue */
+ /* To add filter in HW, absolute queue number in global
+ * space of queues (between 0...N) is needed
+ */
+ u16 hw_queue;
+ } q;
+ } fwd;
enum ice_sw_fwd_act_type fltr_act;
};
@@ -131,11 +144,11 @@ struct ice_tc_flower_fltr {
*/
u16 rid;
u16 rule_id;
- /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
- * destination type
+ /* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI,
+ * VF VSI)
*/
- u16 dest_id;
- /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ u16 dest_vsi_handle;
+ /* ptr to destination VSI */
struct ice_vsi *dest_vsi;
/* direction of fltr for eswitch use case */
enum ice_eswitch_fltr_direction direction;
@@ -162,12 +175,23 @@ struct ice_tc_flower_fltr {
* @f: Pointer to tc-flower filter
*
* Criteria to determine of given filter is valid channel filter
- * or not is based on its "destination". If destination is hw_tc (aka tc_class)
- * and it is non-zero, then it is valid channel (aka ADQ) filter
+ * or not is based on its destination.
+ * For forward to VSI action, if destination is valid hw_tc (aka tc_class)
+ * and in supported range of TCs for ADQ, then return true.
+ * For forward to queue, as long as dest_vsi is valid and it is of type
+ * VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true.
+ * NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based
+ * on destination queue specified.
*/
static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
{
- return !!f->action.tc_class;
+ if (f->action.fltr_act == ICE_FWD_TO_VSI)
+ return f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC &&
+ f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC;
+ else if (f->action.fltr_act == ICE_FWD_TO_Q)
+ return f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL;
+
+ return false;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dbe80e5053a8..086f0b3ab68d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -325,7 +325,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
- ++tx_ring->tx_stats.restart_q;
+ ++tx_ring->ring_stats->tx_stats.restart_q;
}
}
@@ -367,7 +367,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt = -1;
+ tx_ring->ring_stats->tx_stats.prev_pkt = -1;
return 0;
err:
@@ -667,7 +667,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
@@ -680,7 +680,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
@@ -1091,7 +1091,7 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
- rx_ring->rx_stats.non_eop_descs++;
+ rx_ring->ring_stats->rx_stats.non_eop_descs++;
return true;
}
@@ -1222,7 +1222,7 @@ construct_skb:
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->rx_stats.alloc_buf_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
if (rx_buf)
rx_buf->pagecnt_bias++;
break;
@@ -1275,7 +1275,9 @@ construct_skb:
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
- ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
+ if (rx_ring->ring_stats)
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
+ total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1292,15 +1294,25 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,
struct ice_tx_ring *tx_ring;
ice_for_each_tx_ring(tx_ring, *rc) {
- packets += tx_ring->stats.pkts;
- bytes += tx_ring->stats.bytes;
+ struct ice_ring_stats *ring_stats;
+
+ ring_stats = tx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+ packets += ring_stats->stats.pkts;
+ bytes += ring_stats->stats.bytes;
}
} else {
struct ice_rx_ring *rx_ring;
ice_for_each_rx_ring(rx_ring, *rc) {
- packets += rx_ring->stats.pkts;
- bytes += rx_ring->stats.bytes;
+ struct ice_ring_stats *ring_stats;
+
+ ring_stats = rx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+ packets += ring_stats->stats.pkts;
+ bytes += ring_stats->stats.bytes;
}
}
@@ -1549,7 +1561,7 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
- ++tx_ring->tx_stats.restart_q;
+ ++tx_ring->ring_stats->tx_stats.restart_q;
return 0;
}
@@ -2293,7 +2305,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
- tx_ring->tx_stats.tx_linearize++;
+ tx_ring->ring_stats->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
@@ -2304,7 +2316,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
- tx_ring->tx_stats.tx_busy++;
+ tx_ring->ring_stats->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 932b5661ec4d..4fd0e5d0a313 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -191,6 +191,16 @@ struct ice_rxq_stats {
u64 alloc_buf_failed;
};
+struct ice_ring_stats {
+ struct rcu_head rcu; /* to avoid race on free */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ice_txq_stats tx_stats;
+ struct ice_rxq_stats rx_stats;
+ };
+};
+
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
@@ -283,9 +293,7 @@ struct ice_rx_ring {
u16 rx_buf_len;
/* stats structs */
- struct ice_rxq_stats rx_stats;
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
+ struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
/* CL4 - 3rd cacheline starts here */
@@ -325,10 +333,8 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
- struct ice_txq_stats tx_stats;
+ struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7ee38d02d1e5..25f04266c668 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -285,7 +285,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index e1abfcee96dc..e3f622cad425 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -524,7 +524,14 @@ struct ice_sched_node {
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
+ char *name;
+ struct devlink_rate *rate_node;
+ u64 tx_max;
+ u64 tx_share;
u32 agg_id; /* aggregator group ID */
+ u32 id;
+ u32 tx_priority;
+ u32 tx_weight;
u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
@@ -706,7 +713,9 @@ struct ice_port_info {
/* List contain profile ID(s) and other params per layer */
struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_qos_cfg qos_cfg;
+ struct xarray sched_node_ids;
u8 is_vf:1;
+ u8 is_custom_tx_enabled:1;
};
struct ice_switch_info {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 1c51778db951..375eb6493f0f 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -700,6 +700,30 @@ void ice_dis_vf_qs(struct ice_vf *vf)
}
/**
+ * ice_err_to_virt_err - translate errors for VF return code
+ * @err: error return code
+ */
+enum virtchnl_status_code ice_err_to_virt_err(int err)
+{
+ switch (err) {
+ case 0:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case -EINVAL:
+ case -ENODEV:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case -ENOMEM:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case -EALREADY:
+ case -EBUSY:
+ case -EIO:
+ case -ENOSPC:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_check_vf_init - helper to check if VF init complete
* @vf: the pointer to the VF to check
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 15887e772c76..9c8ef2b01f0f 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -25,6 +25,7 @@
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
+enum virtchnl_status_code ice_err_to_virt_err(int err);
struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
bool ice_is_vf_trusted(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index fc8c93fa4455..d4a4001b6e5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -39,6 +39,24 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
+static const u32 ice_legacy_aq_to_vc_speed[15] = {
+ VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
+ VIRTCHNL_LINK_SPEED_100MB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_10GB,
+ VIRTCHNL_LINK_SPEED_20GB,
+ VIRTCHNL_LINK_SPEED_25GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_UNKNOWN,
+ VIRTCHNL_LINK_SPEED_UNKNOWN,
+ VIRTCHNL_LINK_SPEED_UNKNOWN,
+ VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
+};
+
/**
* ice_conv_link_speed_to_virtchnl
* @adv_link_support: determines the format of the returned link speed
@@ -55,79 +73,17 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
- if (adv_link_support)
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- speed = ICE_LINK_SPEED_10MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100MB:
- speed = ICE_LINK_SPEED_100MBPS;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- speed = ICE_LINK_SPEED_1000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- speed = ICE_LINK_SPEED_2500MBPS;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- speed = ICE_LINK_SPEED_5000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = ICE_LINK_SPEED_10000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = ICE_LINK_SPEED_20000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = ICE_LINK_SPEED_25000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- speed = ICE_LINK_SPEED_40000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- speed = ICE_LINK_SPEED_50000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100GB:
- speed = ICE_LINK_SPEED_100000MBPS;
- break;
- default:
- speed = ICE_LINK_SPEED_UNKNOWN;
- break;
- }
- else
+ if (adv_link_support) {
+ /* convert a BIT() value into an array index */
+ speed = ice_get_link_speed(fls(link_speed) - 1);
+ } else {
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- case ICE_AQ_LINK_SPEED_100MB:
- speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- case ICE_AQ_LINK_SPEED_2500MB:
- case ICE_AQ_LINK_SPEED_5GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- case ICE_AQ_LINK_SPEED_50GB:
- case ICE_AQ_LINK_SPEED_100GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
- break;
- default:
- speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
- break;
- }
+ speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
+ }
return speed;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 2b4c791b6cba..dab3cd5d300e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -462,6 +462,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
@@ -1658,6 +1661,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ u32 rxdid;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
@@ -1685,6 +1689,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id, i);
goto error_param;
}
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx],
+ rxdid, 0x03, false);
}
}
@@ -2457,6 +2479,164 @@ error_param:
}
/**
+ * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
+ * @vf: pointer to the VF info
+ */
+static int ice_vc_get_rss_hena(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hena *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hena);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hena = ICE_DEFAULT_RSS_HENA;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hena - set RSS HENA bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hena) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hena = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hena) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_query_rxdid - query RXDID supported by DDP package
+ * @vf: pointer to VF info
+ *
+ * Called from VF to query a bitmap of supported flexible
+ * descriptor RXDIDs of a DDP package.
+ */
+static int ice_vc_query_rxdid(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_supported_rxdids *rxdid = NULL;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_pf *pf = vf->pf;
+ int len = 0;
+ int ret, i;
+ u32 regval;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_supported_rxdids);
+ rxdid = kzalloc(len, GFP_KERNEL);
+ if (!rxdid) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ /* Read flexiflag registers to determine whether the
+ * corresponding RXDID is configured and supported or not.
+ * Since Legacy 16byte descriptor format is not supported,
+ * start from Legacy 32byte descriptor.
+ */
+ for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+ rxdid->supported_rxdids |= BIT(i);
+ }
+
+ pf->supported_rxdids = rxdid->supported_rxdids;
+
+err:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ v_ret, (u8 *)rxdid, len);
+ kfree(rxdid);
+ return ret;
+}
+
+/**
* ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
* @vf: VF to enable/disable VLAN stripping for on initialization
*
@@ -3490,6 +3670,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .query_rxdid = ice_vc_query_rxdid,
+ .get_rss_hena = ice_vc_get_rss_hena,
+ .set_rss_hena_msg = ice_vc_set_rss_hena,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -3624,6 +3807,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .query_rxdid = ice_vc_query_rxdid,
+ .get_rss_hena = ice_vc_get_rss_hena,
+ .set_rss_hena_msg = ice_vc_set_rss_hena,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -3764,6 +3950,15 @@ error_handler:
case VIRTCHNL_OP_DEL_VLAN:
err = ops->remove_vlan_msg(vf, msg);
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ err = ops->query_rxdid(vf);
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ err = ops->get_rss_hena(vf);
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ err = ops->set_rss_hena_msg(vf, msg);
+ break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index b5a3fd8adbb4..b454654d7b0c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -17,6 +17,7 @@
* broadcast, and 16 for additional unicast/multicast filters
*/
#define ICE_MAX_MACADDR_PER_VF 18
+#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
@@ -35,6 +36,9 @@ struct ice_virtchnl_ops {
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*query_rxdid)(struct ice_vf *vf);
+ int (*get_rss_hena)(struct ice_vf *vf);
+ int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 5a82216e7d03..7d547fa616fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -70,6 +70,11 @@ static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
};
+/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
+static const u32 rx_flex_desc_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+};
+
/* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */
static const u32 adv_rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG,
@@ -96,6 +101,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, rx_flex_desc_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 056c904b83cc..907055b77af0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -24,13 +24,24 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
*/
static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
{
- memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
- sizeof(vsi->rx_rings[q_idx]->rx_stats));
- memset(&vsi->tx_rings[q_idx]->stats, 0,
- sizeof(vsi->tx_rings[q_idx]->stats));
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
if (ice_is_xdp_ena_vsi(vsi))
- memset(&vsi->xdp_rings[q_idx]->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->stats));
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
}
/**
@@ -722,7 +733,7 @@ construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
- rx_ring->rx_stats.alloc_buf_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index ff911af16a4b..7d60da1b7bf4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2313,15 +2313,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
data[i+2] += restart2;
i += IGB_TX_QUEUE_STATS_LEN;
@@ -2329,13 +2329,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f8e32833226c..97290fc0fddd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1195,15 +1195,19 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
ring_count = txr_count + rxr_count;
- size = struct_size(q_vector, ring, ring_count);
+ size = kmalloc_size_roundup(struct_size(q_vector, ring, ring_count));
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
if (!q_vector) {
q_vector = kzalloc(size, GFP_KERNEL);
} else if (size > ksize(q_vector)) {
- kfree_rcu(q_vector, rcu);
- q_vector = kzalloc(size, GFP_KERNEL);
+ struct igb_q_vector *new_q_vector;
+
+ new_q_vector = kzalloc(size, GFP_KERNEL);
+ if (new_q_vector)
+ kfree_rcu(q_vector, rcu);
+ q_vector = new_q_vector;
} else {
memset(q_vector, 0, size);
}
@@ -6632,10 +6636,10 @@ void igb_update_stats(struct igb_adapter *adapter)
}
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -6648,10 +6652,10 @@ void igb_update_stats(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 15e57460e19e..6f471b91f562 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -195,23 +195,9 @@ static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
struct e1000_hw *hw = &igb->hw;
- int neg_adj = 0;
- u64 rate;
- u32 incvalue;
-
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
-
- incvalue = INCVALUE_82576;
- rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm,
- 1000000ULL << 16);
+ u64 incvalue;
- if (neg_adj)
- incvalue -= rate;
- else
- incvalue += rate;
+ incvalue = adjust_by_scaled_ppm(INCVALUE_82576, scaled_ppm);
wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 4f9d7f013a95..f7311aeb293b 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -400,6 +400,15 @@
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+/* Transmit Scheduling Latency */
+/* Latency between transmission scheduling (LaunchTime) and the time
+ * the packet is transmitted to the network in nanosecond.
+ */
+#define IGC_TXOFFSET_SPEED_10 0x000034BC
+#define IGC_TXOFFSET_SPEED_100 0x00000578
+#define IGC_TXOFFSET_SPEED_1000 0x0000012C
+#define IGC_TXOFFSET_SPEED_2500 0x00000578
+
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 8cc077b712ad..5a26a7805ef8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -839,15 +839,15 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i + 1] = ring->tx_stats.bytes;
data[i + 2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
data[i + 2] += restart2;
i += IGC_TX_QUEUE_STATS_LEN;
@@ -855,13 +855,13 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i + 1] = ring->rx_stats.bytes;
data[i + 2] = ring->rx_stats.drops;
data[i + 3] = ring->rx_stats.csum_err;
data[i + 4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
i += IGC_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 34889be63e78..1586e1e435c6 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -4682,10 +4682,10 @@ void igc_update_stats(struct igc_adapter *adapter)
}
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -4699,10 +4699,10 @@ void igc_update_stats(struct igc_adapter *adapter)
struct igc_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -5381,6 +5381,13 @@ static void igc_watchdog_task(struct work_struct *work)
break;
}
+ /* Once the launch time has been set on the wire, there
+ * is a delay before the link speed can be determined
+ * based on link-up activity. Write into the register
+ * as soon as we know the correct link speed.
+ */
+ igc_tsn_adjust_txtime_offset(adapter);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index c0d8214148d1..01c86d36856d 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -224,6 +224,7 @@
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_GTXOFFSET 0x3310
#define IGC_BASET_L 0x3314
#define IGC_BASET_H 0x3318
#define IGC_QBVCYCLET 0x331C
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 0fce22de2ab8..f975ed807da1 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -48,6 +48,35 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
return new_flags;
}
+void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u16 txoffset;
+
+ if (!is_any_launchtime(adapter))
+ return;
+
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txoffset = IGC_TXOFFSET_SPEED_10;
+ break;
+ case SPEED_100:
+ txoffset = IGC_TXOFFSET_SPEED_100;
+ break;
+ case SPEED_1000:
+ txoffset = IGC_TXOFFSET_SPEED_1000;
+ break;
+ case SPEED_2500:
+ txoffset = IGC_TXOFFSET_SPEED_2500;
+ break;
+ default:
+ txoffset = 0;
+ break;
+ }
+
+ wr32(IGC_GTXOFFSET, txoffset);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -57,6 +86,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
u32 tqavctrl;
int i;
+ wr32(IGC_GTXOFFSET, 0);
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index 1512307f5a52..b53e6af560b7 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -6,5 +6,6 @@
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
+void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5369a97ff5ec..bc68b8f2176d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -39,7 +39,10 @@
/* TX/RX descriptor defines */
#define IXGBE_DEFAULT_TXD 512
#define IXGBE_DEFAULT_TX_WORK 256
-#define IXGBE_MAX_TXD 4096
+#define IXGBE_MAX_TXD_82598 4096
+#define IXGBE_MAX_TXD_82599 8192
+#define IXGBE_MAX_TXD_X540 8192
+#define IXGBE_MAX_TXD_X550 32768
#define IXGBE_MIN_TXD 64
#if (PAGE_SIZE < 8192)
@@ -47,7 +50,10 @@
#else
#define IXGBE_DEFAULT_RXD 128
#endif
-#define IXGBE_MAX_RXD 4096
+#define IXGBE_MAX_RXD_82598 4096
+#define IXGBE_MAX_RXD_82599 8192
+#define IXGBE_MAX_RXD_X540 8192
+#define IXGBE_MAX_RXD_X550 32768
#define IXGBE_MIN_RXD 64
/* flow control */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e88e3dfac8c2..6cfc9dc16537 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1117,6 +1117,42 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
}
+static u32 ixgbe_get_max_rxd(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ return IXGBE_MAX_RXD_82598;
+ case ixgbe_mac_82599EB:
+ return IXGBE_MAX_RXD_82599;
+ case ixgbe_mac_X540:
+ return IXGBE_MAX_RXD_X540;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ return IXGBE_MAX_RXD_X550;
+ default:
+ return IXGBE_MAX_RXD_82598;
+ }
+}
+
+static u32 ixgbe_get_max_txd(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ return IXGBE_MAX_TXD_82598;
+ case ixgbe_mac_82599EB:
+ return IXGBE_MAX_TXD_82599;
+ case ixgbe_mac_X540:
+ return IXGBE_MAX_TXD_X540;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ return IXGBE_MAX_TXD_X550;
+ default:
+ return IXGBE_MAX_TXD_82598;
+ }
+}
+
static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -1126,8 +1162,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
- ring->rx_max_pending = IXGBE_MAX_RXD;
- ring->tx_max_pending = IXGBE_MAX_TXD;
+ ring->rx_max_pending = ixgbe_get_max_rxd(adapter);
+ ring->tx_max_pending = ixgbe_get_max_txd(adapter);
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
}
@@ -1146,11 +1182,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
return -EINVAL;
new_tx_count = clamp_t(u32, ring->tx_pending,
- IXGBE_MIN_TXD, IXGBE_MAX_TXD);
+ IXGBE_MIN_TXD, ixgbe_get_max_txd(adapter));
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
new_rx_count = clamp_t(u32, ring->rx_pending,
- IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+ IXGBE_MIN_RXD, ixgbe_get_max_rxd(adapter));
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring_count) &&
@@ -1335,10 +1371,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
@@ -1351,10 +1387,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
@@ -1960,18 +1996,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
unsigned int frame_size)
{
unsigned char *data;
- bool match = true;
frame_size >>= 1;
data = page_address(rx_buffer->page) + rx_buffer->page_offset;
- if (data[3] != 0xFF ||
- data[frame_size + 10] != 0xBE ||
- data[frame_size + 12] != 0xAF)
- match = false;
-
- return match;
+ return data[3] == 0xFF && data[frame_size + 10] == 0xBE &&
+ data[frame_size + 12] == 0xAF;
}
static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 298cfbfcb7b6..ab8370c413f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9041,10 +9041,10 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -9064,10 +9064,10 @@ static void ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index f8605f57bd06..0310af851086 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -451,21 +451,11 @@ static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- u64 incval, diff;
- int neg_adj = 0;
-
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
+ u64 incval;
smp_mb();
incval = READ_ONCE(adapter->base_incval);
-
- diff = mul_u64_u64_div_u64(incval, scaled_ppm,
- 1000000ULL << 16);
-
- incval = neg_adj ? (incval - diff) : (incval + diff);
+ incval = adjust_by_scaled_ppm(incval, scaled_ppm);
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -502,17 +492,11 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- int neg_adj = 0;
+ bool neg_adj;
u64 rate;
u32 inca;
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
-
- rate = mul_u64_u64_div_u64(IXGBE_X550_BASE_PERIOD, scaled_ppm,
- 1000000ULL << 16);
+ neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -1318,7 +1302,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
default:
/* Other devices aren't supported */
return;
- };
+ }
IXGBE_WRITE_FLUSH(hw);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index ccfa6b91aac6..296915414a7c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -458,10 +458,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
@@ -475,10 +475,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
@@ -492,10 +492,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e338fa572793..ea0a230c1153 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2044,12 +2044,16 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_bh(&adapter->mbx_lock);
- /* translate error return types so error makes sense */
- if (err == IXGBE_ERR_MBX)
- return -EIO;
+ if (err) {
+ netdev_err(netdev, "VF could not set VLAN %d\n", vid);
+
+ /* translate error return types so error makes sense */
+ if (err == IXGBE_ERR_MBX)
+ return -EIO;
- if (err == IXGBE_ERR_INVALID_ARGUMENT)
- return -EACCES;
+ if (err == IXGBE_ERR_INVALID_ARGUMENT)
+ return -EACCES;
+ }
set_bit(vid, adapter->active_vlans);
@@ -2070,6 +2074,9 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_bh(&adapter->mbx_lock);
+ if (err)
+ netdev_err(netdev, "Could not remove VLAN %d\n", vid);
+
clear_bit(vid, adapter->active_vlans);
return err;
@@ -4350,10 +4357,10 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
@@ -4376,10 +4383,10 @@ static void ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 8941f69d93e9..3b129a1c3381 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -108,6 +108,7 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define TXQ_COMMAND 0x0048
#define TXQ_FIX_PRIO_CONF 0x004c
#define PORT_SERIAL_CONTROL1 0x004c
+#define RGMII_EN 0x00000008
#define CLK125_BYPASS_EN 0x00000010
#define TX_BW_RATE 0x0050
#define TX_BW_MTU 0x0058
@@ -2762,6 +2763,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
+ of_get_phy_mode(pnp, &ppd.interface);
+
ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
if (!ppd.phy_node) {
ppd.phy_addr = MV643XX_ETH_PHY_NONE;
@@ -3093,6 +3096,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct mv643xx_eth_private *mp;
struct net_device *dev;
struct phy_device *phydev = NULL;
+ u32 psc1r;
int err, irq;
pd = dev_get_platdata(&pdev->dev);
@@ -3120,14 +3124,45 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
- /* Kirkwood resets some registers on gated clocks. Especially
- * CLK125_BYPASS_EN must be cleared but is not available on
- * all other SoCs/System Controllers using this driver.
- */
if (of_device_is_compatible(pdev->dev.of_node,
- "marvell,kirkwood-eth-port"))
- wrlp(mp, PORT_SERIAL_CONTROL1,
- rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
+ "marvell,kirkwood-eth-port")) {
+ psc1r = rdlp(mp, PORT_SERIAL_CONTROL1);
+
+ /* Kirkwood resets some registers on gated clocks. Especially
+ * CLK125_BYPASS_EN must be cleared but is not available on
+ * all other SoCs/System Controllers using this driver.
+ */
+ psc1r &= ~CLK125_BYPASS_EN;
+
+ /* On Kirkwood with two Ethernet controllers, if both of them
+ * have RGMII_EN disabled, the first controller will be in GMII
+ * mode and the second one is effectively disabled, instead of
+ * two MII interfaces.
+ *
+ * To enable GMII in the first controller, the second one must
+ * also be configured (and may be enabled) with RGMII_EN
+ * disabled too, even though it cannot be used at all.
+ */
+ switch (pd->interface) {
+ /* Use internal to denote second controller being disabled */
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ psc1r &= ~RGMII_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ psc1r |= RGMII_EN;
+ break;
+ default:
+ /* Unknown; don't touch */
+ break;
+ }
+
+ wrlp(mp, PORT_SERIAL_CONTROL1, psc1r);
+ }
/*
* Start with a default rate, and if there is a clock, allow
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5aefaaff0871..f8925cac61e4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -813,14 +813,14 @@ mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rx_packets = cpu_stats->es.ps.rx_packets;
rx_bytes = cpu_stats->es.ps.rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->es.ps.tx_packets;
tx_bytes = cpu_stats->es.ps.tx_bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -4228,7 +4228,6 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = mvneta_mac_select_pcs,
.mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
@@ -4266,7 +4265,7 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
*/
static void mvneta_percpu_elect(struct mvneta_port *pp)
{
- int elected_cpu = 0, max_cpu, cpu, i = 0;
+ int elected_cpu = 0, max_cpu, cpu;
/* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline.
@@ -4306,8 +4305,6 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
*/
smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
pp, true);
- i++;
-
}
};
@@ -4762,7 +4759,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
stats = per_cpu_ptr(pp->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
skb_alloc_error = stats->es.skb_alloc_error;
refill_error = stats->es.refill_error;
xdp_redirect = stats->es.ps.xdp_redirect;
@@ -4772,7 +4769,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
xdp_xmit_err = stats->es.ps.xdp_xmit_err;
xdp_tx = stats->es.ps.xdp_tx;
xdp_tx_err = stats->es.ps.xdp_tx_err;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
es->skb_alloc_error += skb_alloc_error;
es->refill_error += refill_error;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b399bdb1ca36..4da45c5abba5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2008,7 +2008,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
cpu_stats = per_cpu_ptr(port->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
xdp_redirect = cpu_stats->xdp_redirect;
xdp_pass = cpu_stats->xdp_pass;
xdp_drop = cpu_stats->xdp_drop;
@@ -2016,7 +2016,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
xdp_xmit_err = cpu_stats->xdp_xmit_err;
xdp_tx = cpu_stats->xdp_tx;
xdp_tx_err = cpu_stats->xdp_tx_err;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
xdp_stats->xdp_redirect += xdp_redirect;
xdp_stats->xdp_pass += xdp_pass;
@@ -5115,12 +5115,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
cpu_stats = per_cpu_ptr(port->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -6104,6 +6104,13 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
}
}
+ /* Only valid on OF enabled platforms */
+ if (!of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr)) {
+ *mac_from = "nvmem cell";
+ eth_hw_addr_set(dev, fw_mac_addr);
+ return;
+ }
+
*mac_from = "random";
eth_hw_addr_random(dev);
}
@@ -6603,7 +6610,6 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops mvpp2_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index b45dd7f04e21..5a898fb88e37 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -23,6 +23,7 @@ struct workqueue_struct *octep_wq;
/* Supported Devices */
static const struct pci_device_id octep_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)},
{0, },
};
MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
@@ -905,6 +906,18 @@ static void octep_ctrl_mbox_task(struct work_struct *work)
}
}
+static const char *octep_devid_to_str(struct octep_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
+ return "CNF95N";
+ default:
+ return "Unsupported";
+ }
+}
+
/**
* octep_device_setup() - Setup Octeon Device.
*
@@ -937,9 +950,10 @@ int octep_device_setup(struct octep_device *oct)
switch (oct->chip_id) {
case OCTEP_PCI_DEVICE_ID_CN93_PF:
- dev_info(&pdev->dev,
- "Setting up OCTEON CN93XX PF PASS%d.%d\n",
- OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
+ octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct),
+ OCTEP_MINOR_REV(oct));
octep_device_setup_cn93_pf(oct);
break;
default:
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 025626a61383..123ffc13754d 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -21,6 +21,8 @@
#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF
+
#define OCTEP_MAX_QUEUES 63
#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index c8724bfa86b0..b2b71fe80d61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -64,6 +64,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
{ 0, } /* end of table */
};
@@ -73,12 +74,13 @@ static bool is_dev_rpm(void *cgxd)
{
struct cgx *cgx = cgxd;
- return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
+ (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
}
bool is_lmac_valid(struct cgx *cgx, int lmac_id)
{
- if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
+ if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
return false;
return test_bit(lmac_id, &cgx->lmac_bmap);
}
@@ -90,7 +92,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
{
int tmp, id = 0;
- for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
if (tmp == lmac_id)
break;
id++;
@@ -121,7 +123,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
- if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+ if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
return NULL;
return cgx->lmac_idmap[lmac_id];
@@ -485,7 +487,7 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+ cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
return 0;
}
@@ -740,6 +742,10 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
+
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ return 0;
+
fec_stats_count =
cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
@@ -1224,7 +1230,7 @@ static inline void link_status_user_format(u64 lstat,
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
- linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
@@ -1395,7 +1401,7 @@ int cgx_get_fwdata_base(u64 *base)
if (!cgx)
return -ENXIO;
- first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
+ first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
@@ -1484,7 +1490,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
- int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
@@ -1522,7 +1528,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
int i, err;
/* Do Link up for all the enabled lmacs */
- for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@@ -1542,14 +1548,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
-static void cgx_lmac_get_fifolen(struct cgx *cgx)
-{
- u64 cfg;
-
- cfg = cgx_read(cgx, 0, CGX_CONST);
- cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
-}
-
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free)
{
@@ -1604,17 +1602,20 @@ static int cgx_lmac_init(struct cgx *cgx)
u64 lmac_list;
int i, err;
- cgx_lmac_get_fifolen(cgx);
-
- cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
*/
- if (cgx->mac_ops->non_contiguous_serdes_lane)
- lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ if (is_dev_rpm2(cgx))
+ lmac_list =
+ cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
+ else
+ lmac_list =
+ cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+ }
- if (cgx->lmac_count > MAX_LMAC_PER_CGX)
- cgx->lmac_count = MAX_LMAC_PER_CGX;
+ if (cgx->lmac_count > cgx->max_lmac_per_mac)
+ cgx->lmac_count = cgx->max_lmac_per_mac;
for (i = 0; i < cgx->lmac_count; i++) {
lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
@@ -1635,7 +1636,9 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx;
lmac->mac_to_index_bmap.max =
- MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ cgx->mac_ops->dmac_filter_count /
+ cgx->lmac_count;
+
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
@@ -1692,7 +1695,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
- for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
@@ -1708,6 +1711,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
+
if (is_dev_rpm(cgx))
cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
@@ -1716,6 +1725,15 @@ static void cgx_populate_features(struct cgx *cgx)
RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
+static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
+{
+ if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
+ is_dev_rpm2(cgx))
+ return 0x80;
+ else
+ return 0x60;
+}
+
static struct mac_ops cgx_mac_ops = {
.name = "cgx",
.csr_offset = 0,
@@ -1728,12 +1746,14 @@ static struct mac_ops cgx_mac_ops = {
.non_contiguous_serdes_lane = false,
.rx_stats_cnt = 9,
.tx_stats_cnt = 18,
+ .dmac_filter_count = 32,
.get_nr_lmacs = cgx_get_nr_lmacs,
.get_lmac_type = cgx_get_lmac_type,
.lmac_fifo_len = cgx_get_lmac_fifo_len,
.mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
.mac_get_rx_stats = cgx_get_rx_stats,
.mac_get_tx_stats = cgx_get_tx_stats,
+ .get_fec_stats = cgx_get_fec_stats,
.mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
@@ -1759,11 +1779,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
/* Use mac_ops to get MAC specific features */
- if (pdev->device == PCI_DEVID_CN10K_RPM)
- cgx->mac_ops = rpm_get_mac_ops();
+ if (is_dev_rpm(cgx))
+ cgx->mac_ops = rpm_get_mac_ops(cgx);
else
cgx->mac_ops = &cgx_mac_ops;
+ cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 0b06788b8d80..fb2d37676d84 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -18,11 +18,7 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
-#define CGX_ID_MASK 0x7
-#define MAX_LMAC_PER_CGX 4
-#define MAX_DMAC_ENTRIES_PER_CGX 32
-#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
-#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+#define CGX_ID_MASK 0xF
/* Registers */
#define CGXX_CMRX_CFG 0x00
@@ -56,7 +52,8 @@
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
-#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(55, 32)
+#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
#define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 52b6016789fa..39aaf0e4467d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -75,6 +75,11 @@ struct mac_ops {
/* RPM & CGX differs in number of Receive/transmit stats */
u8 rx_stats_cnt;
u8 tx_stats_cnt;
+ /* Unlike CN10K which shares same CSR offset with CGX
+ * CNF10KB has different csr offset
+ */
+ u64 rxid_map_offset;
+ u8 dmac_filter_count;
/* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
* number of setbits in lmac_exist tells number of lmacs
*/
@@ -121,6 +126,9 @@ struct mac_ops {
int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
+ /* FEC stats */
+ int (*get_fec_stats)(void *cgxd, int lmac_id,
+ struct cgx_fec_stats_rsp *rsp);
};
struct cgx {
@@ -128,7 +136,10 @@ struct cgx {
struct pci_dev *pdev;
u8 cgx_id;
u8 lmac_count;
- struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ /* number of LMACs per MAC could be 4 or 8 */
+ u8 max_lmac_per_mac;
+#define MAX_LMAC_COUNT 8
+ struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;
struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
@@ -150,6 +161,6 @@ struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
bool is_lmac_valid(struct cgx *cgx, int lmac_id);
-struct mac_ops *rpm_get_mac_ops(void);
+struct mac_ops *rpm_get_mac_ops(struct cgx *cgx);
#endif /* LMAC_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 8d5d5a0f68c4..d2584ebb7a70 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -245,6 +245,9 @@ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \
npc_get_secret_key_req, \
npc_get_secret_key_rsp) \
+M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \
+ npc_get_field_status_req, \
+ npc_get_field_status_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -1437,6 +1440,10 @@ struct flow_msg {
u8 tc;
__be16 sport;
__be16 dport;
+ union {
+ u8 ip_flag;
+ u8 next_header;
+ };
};
struct npc_install_flow_req {
@@ -1541,6 +1548,17 @@ struct ptp_rsp {
u64 clk;
};
+struct npc_get_field_status_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+ u8 field;
+};
+
+struct npc_get_field_status_rsp {
+ struct mbox_msghdr hdr;
+ u8 enable;
+};
+
struct set_vf_perm {
struct mbox_msghdr hdr;
u16 vf;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index f187293e3e08..9beeead56d7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -185,8 +185,10 @@ enum key_fields {
NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
NPC_OUTER_VID,
NPC_TOS,
+ NPC_IPFRAG_IPV4,
NPC_SIP_IPV4,
NPC_DIP_IPV4,
+ NPC_IPFRAG_IPV6,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
NPC_IPPROTO_TCP,
@@ -620,6 +622,7 @@ struct rvu_npc_mcam_rule {
bool vfvlan_cfg;
u16 chan;
u16 chan_mask;
+ u8 lxmb;
};
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index a70e1153fa04..de0d88dd10d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -8,7 +8,7 @@
#include "cgx.h"
#include "lmac_common.h"
-static struct mac_ops rpm_mac_ops = {
+static struct mac_ops rpm_mac_ops = {
.name = "rpm",
.csr_offset = 0x4e00,
.lmac_offset = 20,
@@ -20,12 +20,14 @@ static struct mac_ops rpm_mac_ops = {
.non_contiguous_serdes_lane = true,
.rx_stats_cnt = 43,
.tx_stats_cnt = 34,
+ .dmac_filter_count = 32,
.get_nr_lmacs = rpm_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
.lmac_fifo_len = rpm_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
+ .get_fec_stats = rpm_get_fec_stats,
.mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
@@ -37,9 +39,50 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
-struct mac_ops *rpm_get_mac_ops(void)
+static struct mac_ops rpm2_mac_ops = {
+ .name = "rpm",
+ .csr_offset = RPM2_CSR_OFFSET,
+ .lmac_offset = 20,
+ .int_register = RPM2_CMRX_SW_INT,
+ .int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .dmac_filter_count = 64,
+ .get_nr_lmacs = rpm2_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm2_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .get_fec_stats = rpm_get_fec_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+};
+
+bool is_dev_rpm2(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return (rpm->pdev->device == PCI_DEVID_CN10KB_RPM);
+}
+
+struct mac_ops *rpm_get_mac_ops(rpm_t *rpm)
{
- return &rpm_mac_ops;
+ if (is_dev_rpm2(rpm))
+ return &rpm2_mac_ops;
+ else
+ return &rpm_mac_ops;
}
static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
@@ -52,6 +95,16 @@ static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
return cgx_read(rpm, lmac, offset);
}
+/* Read HW major version to determine RPM
+ * MAC type 100/USX
+ */
+static bool is_mac_rpmusx(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return rpm_read(rpm, 0, RPMX_CONST1) & 0x700ULL;
+}
+
int rpm_get_nr_lmacs(void *rpmd)
{
rpm_t *rpm = rpmd;
@@ -59,6 +112,13 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm2_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL);
+}
+
int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -222,6 +282,46 @@ static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
}
}
+static void rpm2_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, lmac_id, RPM2_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold
+ * for 802.3X frames
+ */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPM2_CMR_RX_OVR_BP_EN;
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPM2_CMR_RX_OVR_BP_EN;
+ cfg &= ~RPM2_CMR_RX_OVR_BP_BP;
+ }
+ rpm_write(rpm, lmac_id, RPM2_CMR_RX_OVR_BP, cfg);
+}
+
+static void rpm_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for
+ * 802.3X frames
+ */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+}
+
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
@@ -243,18 +343,11 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
- if (tx_pause) {
- /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
- rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
- cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
- } else {
- /* Disable all Pause Quanta & threshold values */
- rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
- cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
- cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
- }
- rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ if (is_dev_rpm2(rpm))
+ rpm2_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
+ else
+ rpm_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
+
return 0;
}
@@ -278,13 +371,16 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Enable channel mask for all LMACS */
+ if (is_dev_rpm2(rpm))
+ rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
+ else
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+
/* Disable all PFC classes */
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
-
- /* Enable channel mask for all LMACS */
- rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -292,7 +388,7 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
rpm_t *rpm = rpmd;
u64 val_lo, val_hi;
- if (!rpm || lmac_id >= rpm->lmac_count)
+ if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
mutex_lock(&rpm->lock);
@@ -320,7 +416,7 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
rpm_t *rpm = rpmd;
u64 val_lo, val_hi;
- if (!rpm || lmac_id >= rpm->lmac_count)
+ if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
mutex_lock(&rpm->lock);
@@ -380,13 +476,71 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
return 0;
}
+static int rpmusx_lmac_internal_loopback(rpm_t *rpm, int lmac_id, bool enable)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1);
+
+ if (enable)
+ cfg |= RPM2_USX_PCS_LBK;
+ else
+ cfg &= ~RPM2_USX_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1, cfg);
+
+ return 0;
+}
+
+u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ u64 hi_perf_lmac, lmac_info;
+ rpm_t *rpm = rpmd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
+ /* LMACs are divided into two groups and each group
+ * gets half of the FIFO
+ * Group0 lmac_id range {0..3}
+ * Group1 lmac_id range {4..7}
+ */
+ fifo_len = rpm->mac_ops->fifo_len / 2;
+
+ if (lmac_id < 4) {
+ num_lmacs = hweight8(lmac_info & 0xF);
+ hi_perf_lmac = (lmac_info >> 8) & 0x3ULL;
+ } else {
+ num_lmacs = hweight8(lmac_info & 0xF0);
+ hi_perf_lmac = (lmac_info >> 10) & 0x3ULL;
+ hi_perf_lmac += 4;
+ }
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO
+ * and rest 1/4th
+ */
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
u8 lmac_type;
u64 cfg;
- if (!rpm || lmac_id >= rpm->lmac_count)
+ if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
@@ -395,6 +549,9 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
return 0;
}
+ if (is_dev_rpm2(rpm) && is_mac_rpmusx(rpm))
+ return rpmusx_lmac_internal_loopback(rpm, lmac_id, enable);
+
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
if (enable)
@@ -439,8 +596,8 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
{
+ u64 cfg, class_en, pfc_class_mask_cfg;
rpm_t *rpm = rpmd;
- u64 cfg, class_en;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
@@ -476,7 +633,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+
+ rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
return 0;
}
@@ -497,3 +657,59 @@ int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_paus
return 0;
}
+
+int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ u64 val_lo, val_hi;
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ return 0;
+
+ if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
+ val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_corr_blks = (val_hi << 16 | val_lo);
+
+ val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
+ val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
+
+ /* 50G uses 2 Physical serdes lines */
+ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
+ LMAC_MODE_50G_R) {
+ val_lo = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_VL1_CCW_LO);
+ val_hi = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_corr_blks += (val_hi << 16 | val_lo);
+
+ val_lo = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_VL1_NCCW_LO);
+ val_hi = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
+ }
+ } else {
+ /* enable RS-FEC capture */
+ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
+ rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ rsp->fec_corr_blks = (val_hi << 32 | val_lo);
+
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 77f2ef9e1425..22147b4c2137 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -12,17 +12,19 @@
/* PCI device IDs */
#define PCI_DEVID_CN10K_RPM 0xA060
+#define PCI_SUBSYS_DEVID_CNF10KB_RPM 0xBC00
+#define PCI_DEVID_CN10KB_RPM 0xA09F
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
+#define RPMX_CMRX_RX_ID_MAP 0x80
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
#define RPMX_CMRX_LINK_CFG 0x1070
#define RPMX_MTI_PCS100X_CONTROL1 0x20000
-#define RPMX_MTI_LPCSX_CONTROL1 0x30000
#define RPMX_MTI_PCS_LBK BIT_ULL(14)
#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
@@ -76,11 +78,40 @@
#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+#define RPMX_CONST1 0x2008
+
+/* FEC stats */
+#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
+#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
+#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
+#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
+#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
+#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
+#define RPMX_MTI_FCFECX_CW_HI 0x38638
+
+/* CN10KB CSR Declaration */
+#define RPM2_CMRX_SW_INT 0x1b0
+#define RPM2_CMRX_SW_INT_ENA_W1S 0x1b8
+#define RPM2_CMR_CHAN_MSK_OR 0x3120
+#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2)
+#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1)
+#define RPM2_CMR_RX_OVR_BP 0x3130
+#define RPM2_CSR_OFFSET 0x3e00
+#define RPM2_CMRX_PRT_CBFC_CTL 0x6510
+#define RPM2_CMRX_RX_LMACS 0x100
+#define RPM2_CMRX_RX_LOGL_XON 0x3100
+#define RPM2_CMRX_RX_STAT2 0x3010
+#define RPM2_USX_PCSX_CONTROL1 0x80000
+#define RPM2_USX_PCS_LBK BIT_ULL(14)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
+u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id);
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
@@ -97,4 +128,7 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
u8 *rx_pause);
+int rpm2_get_nr_lmacs(void *rpmd);
+bool is_dev_rpm2(void *rpmd);
+int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 76474385a602..7f0a64731c67 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -410,9 +410,15 @@ struct rvu_fwdata {
u32 ptp_ext_tstamp;
#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
-#define CGX_MAX 5
+#define CGX_MAX 9
#define CGX_LMACS_MAX 4
- struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+#define CGX_LMACS_USX 8
+ union {
+ struct cgx_lmac_fwdata_s
+ cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ struct cgx_lmac_fwdata_s
+ cgx_fw_data_usx[CGX_MAX][CGX_LMACS_USX];
+ };
/* Do not add new fields below this line */
};
@@ -478,7 +484,7 @@ struct rvu {
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
- u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
*/
unsigned long pf_notify_bmap; /* Flags for PF notification */
@@ -851,6 +857,7 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
u64 bcast_mcast_val, u64 bcast_mcast_mask);
void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index addc69f4b65c..438b212fb54a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
return (cgx_features_get(cgxd) & feature);
}
+#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
/* Returns bitmap of mapped PFs */
-static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
@@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap)
return -ENODEV;
else
- return find_first_bit(&pfmap, 16);
+ return find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
@@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!cgx_cnt_max)
return 0;
- if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
@@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
- rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
- GFP_KERNEL);
+ rvu->cgxlmac2pf_map =
+ devm_kzalloc(rvu->dev,
+ cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
+ GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
@@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!rvu_cgx_pdata(cgx, rvu))
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
- for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
- pfid = find_first_bit(&pfmap, 16);
+ pfid = find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
@@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
- for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
- for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@@ -468,6 +472,7 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
int pf = rvu_get_pf(pcifunc);
int i = 0, lmac_count = 0;
+ struct mac_ops *mac_ops;
u8 max_dmac_filters;
u8 cgx_id, lmac_id;
void *cgx_dev;
@@ -483,7 +488,12 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
- max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ mac_ops = get_mac_ops(cgx_dev);
+ if (!mac_ops)
+ return;
+
+ max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
for (i = 0; i < max_dmac_filters; i++)
cgx_lmac_addr_del(cgx_id, lmac_id, i);
@@ -569,6 +579,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct cgx_fec_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
@@ -577,7 +588,8 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
- return cgx_get_fec_stats(cgxd, lmac, rsp);
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->get_fec_stats(cgxd, lmac, rsp);
}
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
@@ -1110,8 +1122,15 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
- sizeof(struct cgx_lmac_fwdata_s));
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ memcpy(&rsp->fwdata,
+ &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ else
+ memcpy(&rsp->fwdata,
+ &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index f66dde2b0f92..fa280ebd3052 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2613,7 +2613,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
@@ -2759,6 +2759,12 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
seq_printf(s, "\t%s ", npc_get_field_name(bit));
switch (bit) {
+ case NPC_LXMB:
+ if (rule->lxmb == 1)
+ seq_puts(s, "\tL2M nibble is set\n");
+ else
+ seq_puts(s, "\tL2B nibble is set\n");
+ break;
case NPC_DMAC:
seq_printf(s, "%pM ", rule->packet.dmac);
seq_printf(s, "mask %pM\n", rule->mask.dmac);
@@ -2796,6 +2802,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%pI6 ", rule->packet.ip6dst);
seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
break;
+ case NPC_IPFRAG_IPV6:
+ seq_printf(s, "0x%x ", rule->packet.next_header);
+ seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
+ break;
+ case NPC_IPFRAG_IPV4:
+ seq_printf(s, "0x%x ", rule->packet.ip_flag);
+ seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
+ break;
case NPC_SPORT_TCP:
case NPC_SPORT_UDP:
case NPC_SPORT_SCTP:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 88dee589cb21..bda1a6fa2ec4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1547,14 +1547,7 @@ static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
return 0;
}
-static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
- struct netlink_ext_ack *extack)
-{
- return devlink_info_driver_name_put(req, DRV_NAME);
-}
-
static const struct devlink_ops rvu_devlink_ops = {
- .info_get = rvu_devlink_info_get,
.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a62c1b322012..6b8747ebc08c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -3197,8 +3197,12 @@ static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
{
- /* RPM supports FIFO len 128 KB */
- if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ int fifo_size = rvu_cgx_get_fifolen(rvu);
+
+ /* RPM supports FIFO len 128 KB and RPM2 supports double the
+ * FIFO len to accommodate 8 LMACS
+ */
+ if (fifo_size == 0x20000 || fifo_size == 0x40000)
*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
else
*max_mtu = NIC_HW_MAX_FRS;
@@ -4109,7 +4113,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Get LMAC id's from bitmap */
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
- for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
if (!lmac_fifo_len) {
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 1e348fd0d930..16cfc802e348 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -617,6 +617,12 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
+ /* Ucast rule should not be installed if DMAC
+ * extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf))
+ return;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
@@ -778,6 +784,14 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Bcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
@@ -848,6 +862,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Mcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 7c4e1acd0f77..006beb5cf98d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -26,8 +26,10 @@ static const char * const npc_flow_names[] = {
[NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
[NPC_OUTER_VID] = "outer vlan id",
[NPC_TOS] = "tos",
+ [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
[NPC_SIP_IPV4] = "ipv4 source ip",
[NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ",
[NPC_SIP_IPV6] = "ipv6 source ip",
[NPC_DIP_IPV6] = "ipv6 destination ip",
[NPC_IPPROTO_TCP] = "ip proto tcp",
@@ -43,9 +45,23 @@ static const char * const npc_flow_names[] = {
[NPC_DPORT_UDP] = "udp destination port",
[NPC_SPORT_SCTP] = "sctp source port",
[NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_LXMB] = "Mcast/Bcast header ",
[NPC_UNKNOWN] = "unknown",
};
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 mcam_features;
+ u64 unsupported;
+
+ mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features;
+ unsupported = (mcam_features ^ features) & ~mcam_features;
+
+ /* Return false if at least one of the input flows is not extracted */
+ return !unsupported;
+}
+
const char *npc_get_field_name(u8 hdr)
{
if (hdr >= ARRAY_SIZE(npc_flow_names))
@@ -340,8 +356,10 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
/* if key profile programmed does not extract Ethertype at all */
- if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n");
goto vlan_tci;
+ }
/* if key profile programmed extracts Ethertype from one layer */
if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
@@ -354,35 +372,45 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
/* if key profile programmed extracts Ethertype from multiple layers */
if (etype_ether->nr_kws && etype_tag1->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n");
goto vlan_tci;
+ }
}
key_fields[NPC_ETYPE] = *etype_tag1;
}
if (etype_ether->nr_kws && etype_tag2->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n");
goto vlan_tci;
+ }
}
key_fields[NPC_ETYPE] = *etype_tag2;
}
if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n");
goto vlan_tci;
+ }
}
key_fields[NPC_ETYPE] = *etype_tag2;
}
/* check none of higher layers overwrite Ethertype */
start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
- if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n");
goto vlan_tci;
+ }
*features |= BIT_ULL(NPC_ETYPE);
vlan_tci:
/* if key profile does not extract outer vlan tci at all */
- if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n");
goto done;
+ }
/* if key profile extracts outer vlan tci from one layer */
if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
@@ -393,15 +421,19 @@ vlan_tci:
/* if key profile extracts outer vlan tci from multiple layers */
if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n");
goto done;
+ }
}
key_fields[NPC_OUTER_VID] = *vlan_tag2;
}
/* check none of higher layers overwrite outer vlan tci */
start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
- if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n");
goto done;
+ }
*features |= BIT_ULL(NPC_OUTER_VID);
done:
return;
@@ -419,8 +451,6 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
key = FIELD_GET(NPC_KEY_OFFSET, cfg);
- start_kwi = key / 8;
- offset = (key * 8) % 64;
/* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
* ethernet header.
@@ -435,13 +465,18 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
do { \
+ start_kwi = key / 8; \
+ offset = (key * 8) % 64; \
if (lid == (hlid) && lt == (hlt)) { \
if ((hstart) >= hdr && \
((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ offset += bit_offset; \
+ start_kwi += offset / 64; \
+ offset %= 64; \
npc_set_kw_masks(mcam, (name), (hlen) * 8, \
- start_kwi, offset + bit_offset, intf);\
+ start_kwi, offset, intf); \
} \
} \
} while (0)
@@ -451,8 +486,10 @@ do { \
* Example: Source IP is 4 bytes and starts at 12th byte of IP header
*/
NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1);
NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1);
NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
@@ -522,6 +559,10 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
BIT_ULL(NPC_VLAN_ETYPE_STAG);
+
+ /* for L2M/L2B/L3M/L3B, check if the type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
+ *features |= BIT_ULL(NPC_LXMB);
}
/* Scan key extraction profile and record how fields of our interest
@@ -599,16 +640,6 @@ static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
dev_err(rvu->dev, "Channel cannot be overwritten\n");
return -EINVAL;
}
- /* DMAC should be present in key for unicast filter to work */
- if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
- dev_err(rvu->dev, "DMAC not present in Key\n");
- return -EINVAL;
- }
- /* check that none of the fields overwrite DMAC */
- if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
- dev_err(rvu->dev, "DMAC cannot be overwritten\n");
- return -EINVAL;
- }
npc_set_features(rvu, blkaddr, NIX_INTF_TX);
npc_set_features(rvu, blkaddr, NIX_INTF_RX);
@@ -639,9 +670,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
unsupported = (*mcam_features ^ features) & ~(*mcam_features);
if (unsupported) {
- dev_info(rvu->dev, "Unsupported flow(s):\n");
+ dev_warn(rvu->dev, "Unsupported flow(s):\n");
for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
- dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ dev_warn(rvu->dev, "%s ", npc_get_field_name(bit));
return -EOPNOTSUPP;
}
@@ -851,6 +882,11 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_LXMB)) {
+ output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1;
+ npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0,
+ output->lxmb, 0, intf);
+ }
#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
do { \
if (features & BIT_ULL((field))) { \
@@ -867,6 +903,8 @@ do { \
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0);
NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
+ NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0,
+ mask->ip_flag, 0);
NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
ntohl(mask->ip4src), 0);
NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
@@ -887,6 +925,8 @@ do { \
NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
ntohs(mask->vlan_tci), 0);
+ NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
+ mask->next_header, 0);
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
npc_update_vlan_features(rvu, entry, features, intf);
@@ -991,8 +1031,20 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg;
- if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
- action = pfvf->def_ucast_rule->rx_action;
+ if (req->op == NIX_RX_ACTION_DEFAULT) {
+ if (pfvf->def_ucast_rule) {
+ action = pfvf->def_ucast_rule->rx_action;
+ } else {
+ /* For profiles which do not extract DMAC, the default
+ * unicast entry is unused. Hence modify action for the
+ * requests which use same action as default unicast
+ * entry
+ */
+ *(u64 *)&action = 0;
+ action.pf_func = target;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+ }
entry->action = *(u64 *)&action;
@@ -1153,6 +1205,7 @@ find_rule:
rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
rule->chan &= rule->chan_mask;
+ rule->lxmb = dummy.lxmb;
if (is_npc_intf_tx(req->intf))
rule->intf = pfvf->nix_tx_intf;
else
@@ -1215,6 +1268,35 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_FLOW_INTF_INVALID;
+ /* If DMAC is not extracted in MKEX, rules installed by AF
+ * can rely on L2MB bit set by hardware protocol checker for
+ * broadcast and multicast addresses.
+ */
+ if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf))
+ goto process_flow;
+
+ if (is_pffunc_af(req->hdr.pcifunc) &&
+ req->features & BIT_ULL(NPC_DMAC)) {
+ if (is_unicast_ether_addr(req->packet.dmac)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support ucast flow\n",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support bcast/mcast flow",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ /* Modify feature to use LXMB instead of DMAC */
+ req->features &= ~BIT_ULL(NPC_DMAC);
+ req->features |= BIT_ULL(NPC_LXMB);
+ }
+
+process_flow:
if (from_vf && req->default_rule)
return NPC_FLOW_VF_PERM_DENIED;
@@ -1558,3 +1640,22 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
return 0;
}
+
+int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu,
+ struct npc_get_field_status_req *req,
+ struct npc_get_field_status_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ if (npc_check_field(rvu, blkaddr, req->field, req->intf))
+ rsp->enable = 1;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index 594029007f85..f69102d20c90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -490,7 +490,7 @@ static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
if (idx == table->tot_ids) {
mutex_unlock(&table->lock);
dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
- __func__, bitmap_weight(table->id_bmap, table->tot_ids));
+ __func__, table->tot_ids);
return false;
}
@@ -1870,12 +1870,11 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Set capability to true */
rvu->hw->cap.npc_exact_match_enabled = true;
- table = kmalloc(sizeof(*table), GFP_KERNEL);
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
- memset(table, 0, sizeof(*table));
rvu->hw->table = table;
/* Read table size, ways and depth */
@@ -1899,24 +1898,24 @@ int rvu_npc_exact_init(struct rvu *rvu)
table_size = table->mem_table.depth * table->mem_table.ways;
/* Allocate bitmap for 4way 2K table */
- table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size),
- sizeof(long), GFP_KERNEL);
+ table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
+ GFP_KERNEL);
if (!table->mem_table.bmap)
return -ENOMEM;
dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
/* Allocate bitmap for 32 entry mcam */
- table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL);
+ table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
if (!table->cam_table.bmap)
return -ENOMEM;
dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
- table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth;
- table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids),
- table->tot_ids, GFP_KERNEL);
+ table->tot_ids = table_size + table->cam_table.depth;
+ table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
+ GFP_KERNEL);
if (!table->id_bmap)
return -ENOMEM;
@@ -1957,7 +1956,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Install SDP drop rule */
drop_mcam_idx = &table->num_drop_rules;
- max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
+ max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
+ PF_CGXMAP_BASE;
+
for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
if (rvu->pf2cgxlmac_map[i] == 0xFF)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 67aa02bb2b85..5bee3c3a7ce4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -28,6 +28,9 @@
#include "otx2_devlink.h"
#include <rvu_trace.h>
+/* IPv4 flag more fragment bit */
+#define IPV4_FLAG_MORE 0x20
+
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 777a27047c8e..63ef7c41d18d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -77,22 +77,7 @@ static const struct devlink_param otx2_dl_params[] = {
otx2_dl_mcam_count_validate),
};
-/* Devlink OPs */
-static int otx2_devlink_info_get(struct devlink *devlink,
- struct devlink_info_req *req,
- struct netlink_ext_ack *extack)
-{
- struct otx2_devlink *otx2_dl = devlink_priv(devlink);
- struct otx2_nic *pfvf = otx2_dl->pfvf;
-
- if (is_otx2_vf(pfvf->pcifunc))
- return devlink_info_driver_name_put(req, "rvu_nicvf");
-
- return devlink_info_driver_name_put(req, "rvu_nicpf");
-}
-
static const struct devlink_ops otx2_devlink_ops = {
- .info_get = otx2_devlink_info_get,
};
int otx2_register_dl(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 0eb74e8c553d..0f8d1a69139f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1268,6 +1268,39 @@ end:
return err;
}
+static void otx2_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ otx2_update_lmac_fec_stats(pfvf);
+
+ /* Report MAC FEC stats */
+ fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks;
+ fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_stats->corrected_blocks.total = p->brfec_corr_blks;
+ fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks;
+ } else {
+ fec_stats->corrected_blocks.total = p->rsfec_corr_cws;
+ fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -1298,6 +1331,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_ts_info = otx2_get_ts_info,
+ .get_fec_stats = otx2_get_fec_stats,
.get_fecparam = otx2_get_fecparam,
.set_fecparam = otx2_set_fecparam,
.get_link_ksettings = otx2_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 709fc0114fbd..684cb8ec9f21 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -164,6 +164,8 @@ EXPORT_SYMBOL(otx2_alloc_mcam_entries);
static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_get_field_status_req *freq;
+ struct npc_get_field_status_rsp *frsp;
struct npc_mcam_alloc_entry_req *req;
struct npc_mcam_alloc_entry_rsp *rsp;
int vf_vlan_max_flows;
@@ -214,8 +216,29 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
OTX2_MAX_UNICAST_FLOWS;
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
- pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+
+ /* Check if NPC_DMAC field is supported
+ * by the mkex profile before setting VLAN support flag.
+ */
+ freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
+ if (!freq) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ freq->field = NPC_DMAC;
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &freq->hdr);
+
+ if (frsp->enable) {
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ }
pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
mutex_unlock(&pfvf->mbox.lock);
@@ -688,6 +711,11 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
+ if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
+ pkt->next_header = ipv6_usr_hdr->l4_proto;
+ pmask->next_header = ipv6_usr_mask->l4_proto;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ }
pkt->etype = cpu_to_be16(ETH_P_IPV6);
pmask->etype = cpu_to_be16(0xFFFF);
req->features |= BIT_ULL(NPC_ETYPE);
@@ -868,10 +896,22 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
req->features |= BIT_ULL(NPC_OUTER_VID);
}
- /* Not Drop/Direct to queue but use action in default entry */
- if (fsp->m_ext.data[1] &&
- fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
- req->op = NIX_RX_ACTION_DEFAULT;
+ if (fsp->m_ext.data[1]) {
+ if (flow_type == IP_USER_FLOW) {
+ if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
+ return -EINVAL;
+
+ pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
+ pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (fsp->h_ext.data[1] ==
+ cpu_to_be32(OTX2_DEFAULT_ACTION)) {
+ /* Not Drop/Direct to queue but use action
+ * in default entry
+ */
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+ }
}
if (fsp->flow_type & FLOW_MAC_EXT &&
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 303930499a4c..c1ea60bc2630 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1973,7 +1973,7 @@ static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
#endif
#ifdef CONFIG_DCB
- if (!skb->vlan_present)
+ if (!skb_vlan_tag_present(skb))
goto pick_tx;
vlan_prio = skb->vlan_tci >> 13;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 6a01ab1a6e6f..044cc211424e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -532,6 +532,31 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (ntohs(flow_spec->etype) == ETH_P_IP) {
+ flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_mask->ip_flag = 0xff;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
+ flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_mask->next_header = 0xff;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 06279cd6da67..2a4c9df4eb79 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -355,11 +355,6 @@ static int prestera_dl_info_get(struct devlink *dl,
{
struct prestera_switch *sw = devlink_priv(dl);
char buf[16];
- int err;
-
- err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME);
- if (err)
- return err;
snprintf(buf, sizeof(buf), "%d.%d.%d",
sw->dev->fw_rev.maj,
@@ -445,23 +440,6 @@ void prestera_devlink_port_unregister(struct prestera_port *port)
devlink_port_unregister(&port->dl_port);
}
-void prestera_devlink_port_set(struct prestera_port *port)
-{
- devlink_port_type_eth_set(&port->dl_port, port->dev);
-}
-
-void prestera_devlink_port_clear(struct prestera_port *port)
-{
- devlink_port_type_clear(&port->dl_port);
-}
-
-struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
-{
- struct prestera_port *port = netdev_priv(dev);
-
- return &port->dl_port;
-}
-
int prestera_devlink_traps_register(struct prestera_switch *sw)
{
const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index b322295bad3a..bf84ad6fd87e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -15,11 +15,6 @@ void prestera_devlink_unregister(struct prestera_switch *sw);
int prestera_devlink_port_register(struct prestera_port *port);
void prestera_devlink_port_unregister(struct prestera_port *port);
-void prestera_devlink_port_set(struct prestera_port *port);
-void prestera_devlink_port_clear(struct prestera_port *port);
-
-struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
-
void prestera_devlink_trap_report(struct prestera_port *port,
struct sk_buff *skb, u8 cpu_code);
int prestera_devlink_traps_register(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 47796e4d900c..9d504142e51a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -360,7 +360,6 @@ static void prestera_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_mac_ops prestera_mac_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = prestera_mac_select_pcs,
.mac_config = prestera_mac_config,
.mac_link_down = prestera_mac_link_down,
@@ -569,7 +568,6 @@ static const struct net_device_ops prestera_netdev_ops = {
.ndo_change_mtu = prestera_port_change_mtu,
.ndo_get_stats64 = prestera_port_get_stats64,
.ndo_set_mac_address = prestera_port_set_mac_address,
- .ndo_get_devlink_port = prestera_devlink_get_port,
};
int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes)
@@ -644,6 +642,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
+ SET_NETDEV_DEVLINK_PORT(dev, &port->dl_port);
if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP)
netif_carrier_off(dev);
@@ -737,8 +736,6 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_register_netdev;
- prestera_devlink_port_set(port);
-
err = prestera_port_sfp_bind(port);
if (err)
goto err_sfp_bind;
@@ -762,7 +759,6 @@ static void prestera_port_destroy(struct prestera_port *port)
struct net_device *dev = port->dev;
cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
- prestera_devlink_port_clear(port);
unregister_netdev(dev);
prestera_port_list_del(port);
prestera_devlink_port_unregister(port);
@@ -863,17 +859,10 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
- struct device_node *base_mac_np;
- int ret = 0;
-
- if (sw->np) {
- base_mac_np = of_parse_phandle(sw->np, "base-mac-provider", 0);
- if (base_mac_np) {
- ret = of_get_mac_address(base_mac_np, sw->base_mac);
- of_node_put(base_mac_np);
- }
- }
+ int ret;
+ if (sw->np)
+ ret = of_get_mac_address(sw->np, sw->base_mac);
if (!is_valid_ether_addr(sw->base_mac) || ret) {
eth_random_addr(sw->base_mac);
dev_info(prestera_dev(sw), "using random base mac address\n");
@@ -1377,7 +1366,7 @@ static int prestera_switch_init(struct prestera_switch *sw)
{
int err;
- sw->np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+ sw->np = sw->dev->dev->of_node;
err = prestera_hw_switch_init(sw);
if (err) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 59470d99f522..f328d957b2db 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -15,12 +15,13 @@
#define PRESTERA_MSG_MAX_SIZE 1500
#define PRESTERA_SUPP_FW_MAJ_VER 4
-#define PRESTERA_SUPP_FW_MIN_VER 0
+#define PRESTERA_SUPP_FW_MIN_VER 1
#define PRESTERA_PREV_FW_MAJ_VER 4
#define PRESTERA_PREV_FW_MIN_VER 0
#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+#define PRESTERA_FW_ARM64_PATH_FMT "mrvl/prestera/mvsw_prestera_fw_arm64-v%u.%u.img"
#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
#define PRESTERA_FW_DL_TIMEOUT_MS 50000
@@ -184,6 +185,15 @@ struct prestera_fw_regs {
#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
#define PRESTERA_FW_READY_WAIT_MS 20000
+#define PRESTERA_DEV_ID_AC3X_98DX_55 0xC804
+#define PRESTERA_DEV_ID_AC3X_98DX_65 0xC80C
+#define PRESTERA_DEV_ID_ALDRIN2 0xCC1E
+#define PRESTERA_DEV_ID_98DX7312M 0x981F
+#define PRESTERA_DEV_ID_98DX3500 0x9820
+#define PRESTERA_DEV_ID_98DX3501 0x9826
+#define PRESTERA_DEV_ID_98DX3510 0x9821
+#define PRESTERA_DEV_ID_98DX3520 0x9822
+
struct prestera_fw_evtq {
u8 __iomem *addr;
size_t len;
@@ -201,6 +211,7 @@ struct prestera_fw {
const struct firmware *bin;
struct workqueue_struct *wq;
struct prestera_device dev;
+ struct pci_dev *pci_dev;
u8 __iomem *ldr_regs;
u8 __iomem *ldr_ring_buf;
u32 ldr_buf_len;
@@ -689,6 +700,20 @@ static int prestera_fw_hdr_parse(struct prestera_fw *fw)
return prestera_fw_rev_check(fw);
}
+static const char *prestera_fw_path_fmt_get(struct prestera_fw *fw)
+{
+ switch (fw->pci_dev->device) {
+ case PRESTERA_DEV_ID_98DX3500:
+ case PRESTERA_DEV_ID_98DX3501:
+ case PRESTERA_DEV_ID_98DX3510:
+ case PRESTERA_DEV_ID_98DX3520:
+ return PRESTERA_FW_ARM64_PATH_FMT;
+
+ default:
+ return PRESTERA_FW_PATH_FMT;
+ }
+}
+
static int prestera_fw_get(struct prestera_fw *fw)
{
int ver_maj = PRESTERA_SUPP_FW_MAJ_VER;
@@ -697,7 +722,7 @@ static int prestera_fw_get(struct prestera_fw *fw)
int err;
pick_fw_ver:
- snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ snprintf(fw_path, sizeof(fw_path), prestera_fw_path_fmt_get(fw),
ver_maj, ver_min);
err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
@@ -774,22 +799,56 @@ out_release:
return err;
}
+static bool prestera_pci_pp_use_bar2(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PRESTERA_DEV_ID_98DX7312M:
+ case PRESTERA_DEV_ID_98DX3500:
+ case PRESTERA_DEV_ID_98DX3501:
+ case PRESTERA_DEV_ID_98DX3510:
+ case PRESTERA_DEV_ID_98DX3520:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static u32 prestera_pci_pp_bar2_offs(struct pci_dev *pdev)
+{
+ if (pci_resource_len(pdev, 2) == 0x1000000)
+ return 0x0;
+ else
+ return (pci_resource_len(pdev, 2) / 2);
+}
+
+static u32 prestera_pci_fw_bar2_offs(struct pci_dev *pdev)
+{
+ if (pci_resource_len(pdev, 2) == 0x1000000)
+ return 0x400000;
+ else
+ return 0x0;
+}
+
static int prestera_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const char *driver_name = dev_driver_string(&pdev->dev);
+ u8 __iomem *mem_addr, *pp_addr = NULL;
struct prestera_fw *fw;
int err;
err = pcim_enable_device(pdev);
- if (err)
- return err;
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
- err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) |
- BIT(PRESTERA_PCI_BAR_PP),
- pci_name(pdev));
- if (err)
- return err;
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
if (err) {
@@ -797,6 +856,26 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_dma_mask;
}
+ mem_addr = pcim_iomap(pdev, 2, 0);
+ if (!mem_addr) {
+ dev_err(&pdev->dev, "pci mem ioremap failed\n");
+ err = -EIO;
+ goto err_mem_ioremap;
+ }
+
+ /* AC5X devices use second half of BAR2 */
+ if (prestera_pci_pp_use_bar2(pdev)) {
+ pp_addr = mem_addr + prestera_pci_pp_bar2_offs(pdev);
+ mem_addr = mem_addr + prestera_pci_fw_bar2_offs(pdev);
+ } else {
+ pp_addr = pcim_iomap(pdev, 4, 0);
+ if (!pp_addr) {
+ dev_err(&pdev->dev, "pp regs ioremap failed\n");
+ err = -EIO;
+ goto err_pp_ioremap;
+ }
+ }
+
pci_set_master(pdev);
fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
@@ -805,8 +884,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_dev_alloc;
}
- fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW];
- fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP];
+ fw->pci_dev = pdev;
+ fw->dev.ctl_regs = mem_addr;
+ fw->dev.pp_regs = pp_addr;
fw->dev.dev = &pdev->dev;
pci_set_drvdata(pdev, fw);
@@ -854,7 +934,12 @@ err_wq_alloc:
prestera_fw_uninit(fw);
err_prestera_fw_init:
err_pci_dev_alloc:
+err_pp_ioremap:
+err_mem_ioremap:
err_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+err_pci_enable_device:
return err;
}
@@ -867,12 +952,18 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
+ pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_55) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_65) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_ALDRIN2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX7312M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3500) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3501) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3510) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3520) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ab33ba1c3023..ff97b140886a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3894,19 +3894,19 @@ static void sky2_get_stats(struct net_device *dev,
u64 _bytes, _packets;
do {
- start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
+ start = u64_stats_fetch_begin(&sky2->rx_stats.syncp);
_bytes = sky2->rx_stats.bytes;
_packets = sky2->rx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start));
stats->rx_packets = _packets;
stats->rx_bytes = _bytes;
do {
- start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
+ start = u64_stats_fetch_begin(&sky2->tx_stats.syncp);
_bytes = sky2->tx_stats.bytes;
_packets = sky2->tx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start));
stats->tx_packets = _packets;
stats->tx_bytes = _bytes;
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 45ba0970504a..8e0c61c33ff8 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
ifdef CONFIG_DEBUG_FS
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 1d36619c5ec9..e3de9a53b2d9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -23,6 +23,7 @@
#include <linux/jhash.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -54,6 +55,7 @@ static const struct mtk_reg_map mtk_reg_map = {
},
.qdma = {
.qtx_cfg = 0x1800,
+ .qtx_sch = 0x1804,
.rx_ptr = 0x1900,
.rx_cnt_cfg = 0x1904,
.qcrx_ptr = 0x1908,
@@ -61,6 +63,7 @@ static const struct mtk_reg_map mtk_reg_map = {
.rst_idx = 0x1a08,
.delay_irq = 0x1a0c,
.fc_th = 0x1a10,
+ .tx_sch_rate = 0x1a14,
.int_grp = 0x1a20,
.hred = 0x1a44,
.ctx_ptr = 0x1b00,
@@ -113,6 +116,7 @@ static const struct mtk_reg_map mt7986_reg_map = {
},
.qdma = {
.qtx_cfg = 0x4400,
+ .qtx_sch = 0x4404,
.rx_ptr = 0x4500,
.rx_cnt_cfg = 0x4504,
.qcrx_ptr = 0x4508,
@@ -130,6 +134,7 @@ static const struct mtk_reg_map mt7986_reg_map = {
.fq_tail = 0x4724,
.fq_count = 0x4728,
.fq_blen = 0x472c,
+ .tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
.gdma_to_ppe = 0x3333,
@@ -613,6 +618,75 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
+ int speed)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 ofs, val;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ return;
+
+ val = MTK_QTX_SCH_MIN_RATE_EN |
+ /* minimum: 10 Mbps */
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
+
+ if (IS_ENABLED(CONFIG_SOC_MT7621)) {
+ switch (speed) {
+ case SPEED_10:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_100:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_1000:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_100:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_1000:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ofs = MTK_QTX_OFFSET * idx;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
+}
+
static void mtk_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
@@ -638,6 +712,8 @@ static void mtk_mac_link_up(struct phylink_config *config,
break;
}
+ mtk_set_queue_speed(mac->hw, mac->id, speed);
+
/* Configure duplex */
if (duplex == DUPLEX_FULL)
mcr |= MAC_MCR_FORCE_DPX;
@@ -653,7 +729,6 @@ static void mtk_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops mtk_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = mtk_mac_select_pcs,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_config = mtk_mac_config,
@@ -865,7 +940,7 @@ static void mtk_get_stats64(struct net_device *dev,
}
do {
- start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
+ start = u64_stats_fetch_begin(&hw_stats->syncp);
storage->rx_packets = hw_stats->rx_packets;
storage->tx_packets = hw_stats->tx_packets;
storage->rx_bytes = hw_stats->rx_bytes;
@@ -877,7 +952,7 @@ static void mtk_get_stats64(struct net_device *dev,
storage->rx_crc_errors = hw_stats->rx_fcs_errors;
storage->rx_errors = hw_stats->rx_checksum_errors;
storage->tx_aborted_errors = hw_stats->tx_skip;
- } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
storage->tx_errors = dev->stats.tx_errors;
storage->rx_dropped = dev->stats.rx_dropped;
@@ -938,7 +1013,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_DMA_SIZE;
+ int cnt = MTK_QDMA_RING_SIZE;
dma_addr_t dma_addr;
int i;
@@ -1099,7 +1174,8 @@ static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
WRITE_ONCE(desc->txd1, info->addr);
- data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
+ FIELD_PREP(TX_DMA_PQID, info->qid);
if (info->last)
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
@@ -1133,9 +1209,6 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
- if (!info->qid && mac->id)
- info->qid = MTK_QDMA_GMAC2_QID;
-
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
@@ -1179,11 +1252,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
.gso = gso,
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
.vlan = skb_vlan_tag_present(skb),
- .qid = skb->mark & MTK_QDMA_TX_MASK,
+ .qid = skb_get_queue_mapping(skb),
.vlan_tci = skb_vlan_tag_get(skb),
.first = true,
.last = !skb_is_nonlinear(skb),
};
+ struct netdev_queue *txq;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
const struct mtk_soc_data *soc = eth->soc;
@@ -1191,8 +1265,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
int i, n_desc = 1;
+ int queue = skb_get_queue_mapping(skb);
int k = 0;
+ txq = netdev_get_tx_queue(dev, queue);
itxd = ring->next_free;
itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
@@ -1241,7 +1317,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
soc->txrx.dma_max_len);
- txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
+ txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
@@ -1280,7 +1356,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
txd_pdma->txd2 |= TX_DMA_LS1;
}
- netdev_sent_queue(dev, skb->len);
+ netdev_tx_sent_queue(txq, skb->len);
skb_tx_timestamp(skb);
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
@@ -1292,8 +1368,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
wmb();
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
int next_idx;
@@ -1362,7 +1437,7 @@ static void mtk_wake_queue(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
- netif_wake_queue(eth->netdev[i]);
+ netif_tx_wake_all_queues(eth->netdev[i]);
}
}
@@ -1386,7 +1461,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
spin_unlock(&eth->page_lock);
@@ -1412,7 +1487,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
spin_unlock(&eth->page_lock);
@@ -1579,10 +1654,12 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
struct mtk_tx_dma_desc_info txd_info = {
.size = xdpf->len,
.first = true,
.last = !xdp_frame_has_frags(xdpf),
+ .qid = mac->id,
};
int err, index = 0, n_desc = 1, nr_frags;
struct mtk_tx_buf *htx_buf, *tx_buf;
@@ -1632,6 +1709,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = skb_frag_size(&sinfo->frags[index]);
txd_info.last = index + 1 == nr_frags;
+ txd_info.qid = mac->id;
data = skb_frag_address(&sinfo->frags[index]);
index++;
@@ -1937,16 +2015,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
htons(RX_DMA_VPID(trxd.rxd4)),
RX_DMA_VID(trxd.rxd4));
} else if (trxd.rxd2 & RX_DMA_VTAG) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
RX_DMA_VID(trxd.rxd3));
}
+ }
+
+ /* When using VLAN untagging in combination with DSA, the
+ * hardware treats the MTK special tag as a VLAN and untags it.
+ */
+ if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
+ unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
+
+ if (port < ARRAY_SIZE(eth->dsa_meta) &&
+ eth->dsa_meta[port])
+ skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
- /* If the device is attached to a dsa switch, the special
- * tag inserted in VLAN field by hw switch can * be offloaded
- * by RX HW VLAN offload. Clear vlan info.
- */
- if (netdev_uses_dsa(netdev))
- __vlan_hwaccel_clear_tag(skb);
+ __vlan_hwaccel_clear_tag(skb);
}
skb_record_rx_queue(skb, 0);
@@ -1986,8 +2070,46 @@ rx_done:
return done;
}
+struct mtk_poll_state {
+ struct netdev_queue *txq;
+ unsigned int total;
+ unsigned int done;
+ unsigned int bytes;
+};
+
+static void
+mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
+ struct sk_buff *skb)
+{
+ struct netdev_queue *txq;
+ struct net_device *dev;
+ unsigned int bytes = skb->len;
+
+ state->total++;
+ eth->tx_packets++;
+ eth->tx_bytes += bytes;
+
+ dev = eth->netdev[mac];
+ if (!dev)
+ return;
+
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ if (state->txq == txq) {
+ state->done++;
+ state->bytes += bytes;
+ return;
+ }
+
+ if (state->txq)
+ netdev_tx_completed_queue(state->txq, state->done, state->bytes);
+
+ state->txq = txq;
+ state->done = 1;
+ state->bytes = bytes;
+}
+
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
- unsigned int *done, unsigned int *bytes)
+ struct mtk_poll_state *state)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
@@ -2019,12 +2141,9 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- if (tx_buf->type == MTK_TYPE_SKB) {
- struct sk_buff *skb = tx_buf->data;
+ if (tx_buf->type == MTK_TYPE_SKB)
+ mtk_poll_tx_done(eth, state, mac, tx_buf->data);
- bytes[mac] += skb->len;
- done[mac]++;
- }
budget--;
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
@@ -2043,7 +2162,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
}
static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
- unsigned int *done, unsigned int *bytes)
+ struct mtk_poll_state *state)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_buf *tx_buf;
@@ -2061,12 +2180,8 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- if (tx_buf->type == MTK_TYPE_SKB) {
- struct sk_buff *skb = tx_buf->data;
-
- bytes[0] += skb->len;
- done[0]++;
- }
+ if (tx_buf->type == MTK_TYPE_SKB)
+ mtk_poll_tx_done(eth, state, 0, tx_buf->data);
budget--;
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
@@ -2088,26 +2203,15 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct dim_sample dim_sample = {};
- unsigned int done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
- int total = 0, i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
+ struct mtk_poll_state state = {};
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ budget = mtk_poll_tx_qdma(eth, budget, &state);
else
- budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+ budget = mtk_poll_tx_pdma(eth, budget, &state);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] || !done[i])
- continue;
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
- total += done[i];
- eth->tx_packets += done[i];
- eth->tx_bytes += bytes[i];
- }
+ if (state.txq)
+ netdev_tx_completed_queue(state.txq, state.done, state.bytes);
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
&dim_sample);
@@ -2117,7 +2221,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
- return total;
+ return state.total;
}
static void mtk_handle_status_irq(struct mtk_eth *eth)
@@ -2202,19 +2306,26 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
struct mtk_tx_ring *ring = &eth->tx_ring;
int i, sz = soc->txrx.txd_size;
struct mtk_tx_dma_v2 *txd;
+ int ring_size;
+ u32 ofs, val;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ ring_size = MTK_QDMA_RING_SIZE;
+ else
+ ring_size = MTK_DMA_SIZE;
- ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL);
if (!ring->dma)
goto no_tx_mem;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
- int next = (i + 1) % MTK_DMA_SIZE;
+ for (i = 0; i < ring_size; i++) {
+ int next = (i + 1) % ring_size;
u32 next_ptr = ring->phys + next * sz;
txd = ring->dma + i * sz;
@@ -2234,22 +2345,22 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
* descriptors in ring->dma_pdma.
*/
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
goto no_tx_mem;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < ring_size; i++) {
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
ring->dma_pdma[i].txd4 = 0;
}
}
- ring->dma_size = MTK_DMA_SIZE;
- atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
+ ring->dma_size = ring_size;
+ atomic_set(&ring->free_count, ring_size - 2);
ring->next_free = ring->dma;
ring->last_free = (void *)txd;
- ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
+ ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
@@ -2261,14 +2372,31 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ ring->phys + ((ring_size - 1) * sz),
soc->reg_map->qdma.crx_ptr);
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
- soc->reg_map->qdma.qtx_cfg);
+
+ for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
+ val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
+
+ val = MTK_QTX_SCH_MIN_RATE_EN |
+ /* minimum: 10 Mbps */
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
+ ofs += MTK_QTX_OFFSET;
+ }
+ val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
- mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
@@ -2286,7 +2414,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
int i;
if (ring->buf) {
- for (i = 0; i < MTK_DMA_SIZE; i++)
+ for (i = 0; i < ring->dma_size; i++)
mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
@@ -2294,14 +2422,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma_size * soc->txrx.txd_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma_size * soc->txrx.txd_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
@@ -2727,15 +2855,30 @@ static netdev_features_t mtk_fix_features(struct net_device *dev,
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
- int err = 0;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ netdev_features_t diff = dev->features ^ features;
+ int i;
- if (!((dev->features ^ features) & NETIF_F_LRO))
+ if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ /* Set RX VLAN offloading */
+ if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
- if (!(features & NETIF_F_LRO))
- mtk_hwlro_netdev_disable(dev);
+ mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
+ MTK_CDMP_EG_CTRL);
- return err;
+ /* sync features with other MAC */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i] || eth->netdev[i] == dev)
+ continue;
+ eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ return 0;
}
/* wait for DMA to finish whatever it is doing before we start using it again */
@@ -2823,7 +2966,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * soc->txrx.txd_size,
+ MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
@@ -2932,7 +3075,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
- MTK_CHK_DDONE_EN;
+ MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
@@ -2978,11 +3121,85 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
mtk_w32(eth, 0, MTK_RST_GL);
}
+
+static bool mtk_uses_dsa(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ return netdev_uses_dsa(dev) &&
+ dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
+#else
+ return false;
+#endif
+}
+
+static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
+{
+ struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
+ struct mtk_eth *eth = mac->hw;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_link_ksettings s;
+ struct net_device *ldev;
+ struct list_head *iter;
+ struct dsa_port *dp;
+
+ if (event != NETDEV_CHANGE)
+ return NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, ldev, iter) {
+ if (netdev_priv(ldev) == mac)
+ goto found;
+ }
+
+ return NOTIFY_DONE;
+
+found:
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
+ if (__ethtool_get_link_ksettings(dev, &s))
+ return NOTIFY_DONE;
+
+ if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
+ return NOTIFY_DONE;
+
+ dp = dsa_port_from_netdev(dev);
+ if (dp->index >= MTK_QDMA_NUM_QUEUES)
+ return NOTIFY_DONE;
+
+ mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
+
+ return NOTIFY_DONE;
+}
+
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- int err;
+ int i, err;
+
+ if (mtk_uses_dsa(dev) && !eth->prog) {
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+ struct metadata_dst *md_dst = eth->dsa_meta[i];
+
+ if (md_dst)
+ continue;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ eth->dsa_meta[i] = md_dst;
+ }
+ } else {
+ /* Hardware special tag parsing needs to be disabled if at least
+ * one MAC does not use DSA.
+ */
+ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ val &= ~MTK_CDMP_STAG_EN;
+ mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
+ }
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
@@ -3020,7 +3237,8 @@ static int mtk_open(struct net_device *dev)
refcount_inc(&eth->dma_refcnt);
phylink_start(mac->phylink);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
}
@@ -3229,6 +3447,30 @@ static void mtk_dim_tx(struct work_struct *work)
dim->state = DIM_START_MEASURE;
}
+static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
+{
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+
+ if (val <= 1518)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+ else if (val <= 1536)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+ else if (val <= 1552)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+ else
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+}
+
static int mtk_hw_init(struct mtk_eth *eth)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
@@ -3268,16 +3510,17 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- val = RSTCTRL_FE | RSTCTRL_PPE;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
-
- val |= RSTCTRL_ETH;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- val |= RSTCTRL_PPE1;
+ val = RSTCTRL_PPE0_V2;
+ } else {
+ val = RSTCTRL_PPE0;
}
- ethsys_reset(eth, val);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
@@ -3303,14 +3546,26 @@ static int mtk_hw_init(struct mtk_eth *eth)
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
- for (i = 0; i < MTK_MAC_COUNT; i++)
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ struct net_device *dev = eth->netdev[i];
+
mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+ if (dev) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
+ }
+ }
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+ }
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
@@ -3331,9 +3586,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- /* PSE should not drop port8 and port9 packets */
+ /* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+ /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
+ mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
@@ -3420,7 +3678,6 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
int length = new_mtu + MTK_RX_ETH_HLEN;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new;
if (rcu_access_pointer(eth->prog) &&
length > MTK_PP_MAX_BUF_SIZE) {
@@ -3428,23 +3685,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
-
- if (length <= 1518)
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
- else if (length <= 1536)
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
- else if (length <= 1552)
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
- else
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
-
- if (mcr_new != mcr_cur)
- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
- }
-
+ mtk_set_mcr_max_rx(mac, length);
dev->mtu = new_mtu;
return 0;
@@ -3475,11 +3716,8 @@ static void mtk_pending_work(struct work_struct *work)
rtnl_lock();
dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+ set_bit(MTK_RESETTING, &eth->state);
- while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
- cpu_relax();
-
- dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
@@ -3513,7 +3751,7 @@ static void mtk_pending_work(struct work_struct *work)
dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
- clear_bit_unlock(MTK_RESETTING, &eth->state);
+ clear_bit(MTK_RESETTING, &eth->state);
rtnl_unlock();
}
@@ -3528,6 +3766,12 @@ static int mtk_free_dev(struct mtk_eth *eth)
free_netdev(eth->netdev[i]);
}
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+ if (!eth->dsa_meta[i])
+ break;
+ metadata_dst_free(eth->dsa_meta[i]);
+ }
+
return 0;
}
@@ -3536,8 +3780,12 @@ static int mtk_unreg_dev(struct mtk_eth *eth)
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
+ struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
+ mac = netdev_priv(eth->netdev[i]);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ unregister_netdevice_notifier(&mac->device_notifier);
unregister_netdev(eth->netdev[i]);
}
@@ -3688,13 +3936,13 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
do {
data_dst = data;
- start = u64_stats_fetch_begin_irq(&hwstats->syncp);
+ start = u64_stats_fetch_begin(&hwstats->syncp);
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
if (mtk_page_pool_enabled(mac->hw))
mtk_ethtool_pp_stats(mac->hw, data_dst);
- } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstats->syncp, start));
}
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
@@ -3753,6 +4001,23 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ unsigned int queue = 0;
+
+ if (netdev_uses_dsa(dev))
+ queue = skb_get_queue_mapping(skb) + 3;
+ else
+ queue = mac->id;
+
+ if (queue >= dev->num_tx_queues)
+ queue = 0;
+
+ return queue;
+}
+
static const struct ethtool_ops mtk_ethtool_ops = {
.get_link_ksettings = mtk_get_link_ksettings,
.set_link_ksettings = mtk_set_link_ksettings,
@@ -3788,6 +4053,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_setup_tc = mtk_eth_setup_tc,
.ndo_bpf = mtk_xdp,
.ndo_xdp_xmit = mtk_xdp_xmit,
+ .ndo_select_queue = mtk_select_queue,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -3797,6 +4063,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
struct phylink *phylink;
struct mtk_mac *mac;
int id, err;
+ int txqs = 1;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -3814,7 +4081,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
return -EINVAL;
}
- eth->netdev[id] = alloc_etherdev(sizeof(*mac));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
if (!eth->netdev[id]) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -3911,6 +4181,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
else
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mac->device_notifier.notifier_call = mtk_device_event;
+ register_netdevice_notifier(&mac->device_notifier);
+ }
+
return 0;
free_netdev:
@@ -4242,7 +4517,7 @@ static const struct mtk_soc_data mt7621_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
- .offload_version = 2,
+ .offload_version = 1,
.hash_offset = 2,
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
@@ -4281,7 +4556,7 @@ static const struct mtk_soc_data mt7623_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
- .offload_version = 2,
+ .offload_version = 1,
.hash_offset = 2,
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
@@ -4318,6 +4593,7 @@ static const struct mtk_soc_data mt7986_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
+ .offload_version = 2,
.hash_offset = 4,
.foe_entry_size = sizeof(struct mtk_foe_entry),
.txrx = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index b52f3b0177ef..18a50529ce7b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -22,11 +22,16 @@
#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
+#define MTK_MAX_DSA_PORTS 7
+#define MTK_DSA_PORT_MASK GENMASK(2, 0)
+
+#define MTK_QDMA_NUM_QUEUES 16
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
+#define MTK_QDMA_RING_SIZE 2048
#define MTK_DMA_SIZE 512
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
@@ -91,6 +96,9 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
+/* CDMQ Exgress Control Register */
+#define MTK_CDMQ_EG_CTRL 0x1404
+
/* CDMP Ingress Control Register */
#define MTK_CDMP_IG_CTRL 0x400
#define MTK_CDMP_STAG_EN BIT(0)
@@ -120,6 +128,7 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
+#define PSE_PPE0_DROP 0x110
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
@@ -202,8 +211,26 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
+#define MTK_QTX_OFFSET 0x10
#define QDMA_RES_THRES 4
+/* QDMA Tx Queue Scheduler Configuration Registers */
+#define MTK_QTX_SCH_TX_SEL BIT(31)
+#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
+
+#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
+#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
+#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
+
+/* QDMA TX Scheduler Rate Control Register */
+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
+
/* QDMA Global Configuration Register */
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
@@ -222,6 +249,7 @@
#define MTK_WCOMP_EN BIT(24)
#define MTK_RESV_BUF (0x40 << 16)
#define MTK_MUTLI_CNT (0x4 << 12)
+#define MTK_LEAKY_BUCKET_EN BIT(11)
/* QDMA Flow Control Register */
#define FC_THRES_DROP_MODE BIT(20)
@@ -250,8 +278,6 @@
#define MTK_STAT_OFFSET 0x40
/* QDMA TX NUM */
-#define MTK_QDMA_TX_NUM 16
-#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
#define MTK_QDMA_GMAC2_QID 8
@@ -281,6 +307,7 @@
#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14)
+#define TX_DMA_PQID GENMASK(3, 0)
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -447,18 +474,14 @@
/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
-#define RSTCTRL_PPE BIT(31)
-#define RSTCTRL_PPE1 BIT(30)
+#define RSTCTRL_PPE0 BIT(31)
+#define RSTCTRL_PPE0_V2 BIT(30)
+#define RSTCTRL_PPE1 BIT(31)
#define RSTCTRL_ETH BIT(23)
/* ethernet reset check idle register */
#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
-/* ethernet reset control register */
-#define ETHSYS_RSTCTRL 0x34
-#define RSTCTRL_FE BIT(6)
-#define RSTCTRL_PPE BIT(31)
-
/* ethernet dma channel agent map */
#define ETHSYS_DMA_AG_MAP 0x408
#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
@@ -466,8 +489,10 @@
#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
/* SGMII subsystem config registers */
-/* Register to auto-negotiation restart */
+/* BMCR (low 16) BMSR (high 16) */
#define SGMSYS_PCS_CONTROL_1 0x0
+#define SGMII_BMCR GENMASK(15, 0)
+#define SGMII_BMSR GENMASK(31, 16)
#define SGMII_AN_RESTART BIT(9)
#define SGMII_ISOLATE BIT(10)
#define SGMII_AN_ENABLE BIT(12)
@@ -477,13 +502,18 @@
#define SGMII_PCS_FAULT BIT(23)
#define SGMII_AN_EXPANSION_CLR BIT(30)
+#define SGMSYS_PCS_ADVERTISE 0x8
+#define SGMII_ADVERTISE GENMASK(15, 0)
+#define SGMII_LPA GENMASK(31, 16)
+
/* Register to programmable link timer, the unit in 2 * 8ns */
#define SGMSYS_PCS_LINK_TIMER 0x18
-#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
+#define SGMII_LINK_TIMER_MASK GENMASK(19, 0)
+#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & SGMII_LINK_TIMER_MASK)
/* Register to control remote fault */
#define SGMSYS_SGMII_MODE 0x20
-#define SGMII_IF_MODE_BIT0 BIT(0)
+#define SGMII_IF_MODE_SGMII BIT(0)
#define SGMII_SPEED_DUPLEX_AN BIT(1)
#define SGMII_SPEED_MASK GENMASK(3, 2)
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
@@ -932,6 +962,7 @@ struct mtk_reg_map {
} pdma;
struct {
u32 qtx_cfg; /* tx queue configuration */
+ u32 qtx_sch; /* tx queue scheduler configuration */
u32 rx_ptr; /* rx base pointer */
u32 rx_cnt_cfg; /* rx max count configuration */
u32 qcrx_ptr; /* rx cpu pointer */
@@ -949,6 +980,7 @@ struct mtk_reg_map {
u32 fq_tail; /* fq tail pointer */
u32 fq_count; /* fq free page count */
u32 fq_blen; /* fq free page buffer length */
+ u32 tx_sch_rate; /* tx scheduler rate control registers */
} qdma;
u32 gdm1_cnt;
u32 gdma_to_ppe;
@@ -1114,6 +1146,8 @@ struct mtk_eth {
int ip_align;
+ struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
+
struct mtk_ppe *ppe[2];
struct rhashtable flow_table;
@@ -1140,6 +1174,7 @@ struct mtk_mac {
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
unsigned int syscfg0;
+ struct notifier_block device_notifier;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 784ecb2dc9fb..269208a841c7 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -175,6 +175,8 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
} else {
+ int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
+
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
@@ -182,7 +184,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
entry->ib1 = val;
val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
- FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
}
@@ -397,6 +399,24 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
return 0;
}
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue)
+{
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ *ib2 &= ~MTK_FOE_IB2_QID_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_QID;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
+ }
+
+ return 0;
+}
+
static bool
mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
struct mtk_foe_entry *data)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index a09c32539bcc..ea64fac1d425 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -68,7 +68,9 @@ enum {
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_QID_V2 GENMASK(6, 0)
#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
+#define MTK_FOE_IB2_PSE_QOS_V2 BIT(8)
#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
@@ -351,6 +353,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int sid);
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int wdma_idx, int txq, int bss, int wcid);
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 28bbd1df3e30..81afd5ee3fbf 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -188,7 +188,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
int *wed_index)
{
struct mtk_wdma_info info = {};
- int pse_port, dsa_port;
+ int pse_port, dsa_port, queue;
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
@@ -212,8 +212,6 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
}
dsa_port = mtk_flow_get_dsa_port(&dev);
- if (dsa_port >= 0)
- mtk_foe_entry_set_dsa(eth, foe, dsa_port);
if (dev == eth->netdev[0])
pse_port = 1;
@@ -222,6 +220,14 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
else
return -EOPNOTSUPP;
+ if (dsa_port >= 0) {
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
+ queue = 3 + dsa_port;
+ } else {
+ queue = pse_port - 1;
+ }
+ mtk_foe_entry_set_queue(eth, foe, queue);
+
out:
mtk_foe_entry_set_pse_port(eth, foe, pse_port);
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 736839c84130..5c286f2c9418 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -19,110 +19,136 @@ static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct mtk_pcs, pcs);
}
-/* For SGMII interface mode */
-static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
+static void mtk_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- unsigned int val;
-
- /* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER,
- SGMII_LINK_TIMER_DEFAULT);
-
- regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
- val |= SGMII_REMOTE_FAULT_DIS;
- regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
-
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
- val |= SGMII_AN_RESTART;
- regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
-
- regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
- val &= ~SGMII_PHYA_PWD;
- regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ unsigned int bm, adv;
- return 0;
+ /* Read the BMSR and LPA */
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
+ phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
+ FIELD_GET(SGMII_LPA, adv));
}
-/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
- * fixed speed.
- */
-static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
- phy_interface_t interface)
+static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- unsigned int val;
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ unsigned int rgc3, sgm_mode, bmcr;
+ int advertise, link_timer;
+ bool changed, use_an;
- regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val);
- val &= ~RG_PHY_SPEED_MASK;
if (interface == PHY_INTERFACE_MODE_2500BASEX)
- val |= RG_PHY_SPEED_3_125G;
- regmap_write(mpcs->regmap, mpcs->ana_rgc3, val);
+ rgc3 = RG_PHY_SPEED_3_125G;
+ else
+ rgc3 = 0;
+
+ advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
+ advertising);
+ if (advertise < 0)
+ return advertise;
+
+ link_timer = phylink_get_link_timer_ns(interface);
+ if (link_timer < 0)
+ return link_timer;
+
+ /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
+ * we assume that fixes it's speed at bitrate = line rate (in
+ * other words, 1000Mbps or 2500Mbps).
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ sgm_mode = SGMII_IF_MODE_SGMII;
+ if (phylink_autoneg_inband(mode)) {
+ sgm_mode |= SGMII_REMOTE_FAULT_DIS |
+ SGMII_SPEED_DUPLEX_AN;
+ use_an = true;
+ } else {
+ use_an = false;
+ }
+ } else if (phylink_autoneg_inband(mode)) {
+ /* 1000base-X or 2500base-X autoneg */
+ sgm_mode = SGMII_REMOTE_FAULT_DIS;
+ use_an = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ } else {
+ /* 1000base-X or 2500base-X without autoneg */
+ sgm_mode = 0;
+ use_an = false;
+ }
- /* Disable SGMII AN */
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
- val &= ~SGMII_AN_ENABLE;
- regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
+ if (use_an) {
+ /* FIXME: Do we need to set AN_RESTART here? */
+ bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE;
+ } else {
+ bmcr = 0;
+ }
- /* Set the speed etc but leave the duplex unchanged */
- regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
- val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK;
- val |= SGMII_SPEED_1000;
- regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
+ /* Configure the underlying interface speed */
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
+ RG_PHY_SPEED_3_125G, rgc3);
- /* Release PHYA power down state */
- regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
- val &= ~SGMII_PHYA_PWD;
- regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ /* Update the advertisement, noting whether it has changed */
+ regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
+ SGMII_ADVERTISE, advertise, &changed);
- return 0;
-}
+ /* Setup the link timer and QPHY power up inside SGMIISYS */
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
-static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising,
- bool permit_pause_to_mac)
-{
- struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
- int err = 0;
+ /* Update the sgmsys mode register */
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
+ SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
+ SGMII_IF_MODE_SGMII, sgm_mode);
+
+ /* Update the BMCR */
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
+ SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
- /* Setup SGMIISYS with the determined property */
- if (interface != PHY_INTERFACE_MODE_SGMII)
- err = mtk_pcs_setup_mode_force(mpcs, interface);
- else if (phylink_autoneg_inband(mode))
- err = mtk_pcs_setup_mode_an(mpcs);
+ /* Release PHYA power down state */
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
+ SGMII_PHYA_PWD, 0);
- return err;
+ return changed;
}
static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
{
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
- unsigned int val;
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
- val |= SGMII_AN_RESTART;
- regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
+ SGMII_AN_RESTART, SGMII_AN_RESTART);
}
static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed, int duplex)
{
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
- unsigned int val;
-
- if (!phy_interface_mode_is_8023z(interface))
- return;
-
- /* SGMII force duplex setting */
- regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
- val &= ~SGMII_DUPLEX_FULL;
- if (duplex == DUPLEX_FULL)
- val |= SGMII_DUPLEX_FULL;
-
- regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
+ unsigned int sgm_mode;
+
+ if (!phylink_autoneg_inband(mode)) {
+ /* Force the speed and duplex setting */
+ if (speed == SPEED_10)
+ sgm_mode = SGMII_SPEED_10;
+ else if (speed == SPEED_100)
+ sgm_mode = SGMII_SPEED_100;
+ else
+ sgm_mode = SGMII_SPEED_1000;
+
+ if (duplex == DUPLEX_FULL)
+ sgm_mode |= SGMII_DUPLEX_FULL;
+
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
+ SGMII_DUPLEX_FULL | SGMII_SPEED_MASK,
+ sgm_mode);
+ }
}
static const struct phylink_pcs_ops mtk_pcs_ops = {
+ .pcs_get_state = mtk_pcs_get_state,
.pcs_config = mtk_pcs_config,
.pcs_an_restart = mtk_pcs_restart_an,
.pcs_link_up = mtk_pcs_link_up,
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 65e01bf4b4d2..06b6cc53fa02 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -9,6 +9,7 @@
#include <linux/skbuff.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/debugfs.h>
#include <linux/soc/mediatek/mtk_wed.h>
@@ -16,12 +17,14 @@
#include "mtk_wed_regs.h"
#include "mtk_wed.h"
#include "mtk_ppe.h"
+#include "mtk_wed_wo.h"
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
#define MTK_WED_PKT_SIZE 1900
#define MTK_WED_BUF_SIZE 2048
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+#define MTK_WED_RX_RING_SIZE 1536
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
@@ -30,6 +33,10 @@
#define MTK_WED_PER_GROUP_PKT 128
#define MTK_WED_FBUF_SIZE 128
+#define MTK_WED_MIOD_CNT 16
+#define MTK_WED_FB_CMD_CNT 1024
+#define MTK_WED_RRO_QUE_CNT 8192
+#define MTK_WED_MIOD_ENTRY_CNT 128
static struct mtk_wed_hw *hw_list[2];
static DEFINE_MUTEX(hw_lock);
@@ -64,12 +71,81 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
wdma_m32(dev, reg, 0, mask);
}
+static void
+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, mask, 0);
+}
+
+static u32
+wifi_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->wlan.base + reg);
+}
+
+static void
+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->wlan.base + reg);
+}
+
static u32
mtk_wed_read_reset(struct mtk_wed_device *dev)
{
return wed_r32(dev, MTK_WED_RESET);
}
+static u32
+mtk_wdma_read_reset(struct mtk_wed_device *dev)
+{
+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+}
+
+static int
+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+{
+ u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
+ int i, ret;
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
+ ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ !(status & mask), 0, 10000);
+ if (ret)
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
+ if (dev->rx_wdma[i].desc)
+ continue;
+
+ wdma_w32(dev,
+ MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
+ }
+
+ return ret;
+}
+
+static void
+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+{
+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+ int i;
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ !(status & mask), 0, 10000))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ wdma_w32(dev,
+ MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
+}
+
static void
mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
{
@@ -81,6 +157,54 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
WARN_ON_ONCE(1);
}
+static u32
+mtk_wed_wo_read_status(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
+}
+
+static void
+mtk_wed_wo_reset(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ u8 state = MTK_WED_WO_STATE_DISABLE;
+ void __iomem *reg;
+ u32 val;
+
+ mtk_wdma_tx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &state,
+ sizeof(state), false);
+
+ if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
+ val == MTK_WED_WOIF_DISABLE_DONE,
+ 100, MTK_WOCPU_TIMEOUT))
+ dev_err(dev->hw->dev, "failed to disable wed-wo\n");
+
+ reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
+
+ val = readl(reg);
+ switch (dev->hw->index) {
+ case 0:
+ val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ break;
+ case 1:
+ val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ break;
+ default:
+ break;
+ }
+ iounmap(reg);
+}
+
static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
@@ -115,7 +239,7 @@ out:
}
static int
-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
@@ -132,16 +256,16 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
if (!page_list)
return -ENOMEM;
- dev->buf_ring.size = ring_size;
- dev->buf_ring.pages = page_list;
+ dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
return -ENOMEM;
- dev->buf_ring.desc = desc;
- dev->buf_ring.desc_phys = desc_phys;
+ dev->tx_buf_ring.desc = desc;
+ dev->tx_buf_ring.desc_phys = desc_phys;
for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
@@ -202,10 +326,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
}
static void
-mtk_wed_free_buffer(struct mtk_wed_device *dev)
+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
- void **page_list = dev->buf_ring.pages;
+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+ void **page_list = dev->tx_buf_ring.pages;
int page_idx;
int i;
@@ -215,7 +339,8 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
if (!desc)
goto free_pagelist;
- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+ i += MTK_WED_BUF_PER_PAGE) {
void *page = page_list[page_idx++];
dma_addr_t buf_addr;
@@ -228,13 +353,59 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
__free_page(page);
}
- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
- desc, dev->buf_ring.desc_phys);
+ dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
+ desc, dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
+static int
+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc;
+ dma_addr_t desc_phys;
+
+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
+ desc = dma_alloc_coherent(dev->hw->dev,
+ dev->wlan.rx_nbuf * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->rx_buf_ring.desc = desc;
+ dev->rx_buf_ring.desc_phys = desc_phys;
+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
+
+ return 0;
+}
+
+static void
+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
+
+ if (!desc)
+ return;
+
+ dev->wlan.release_rx_buf(dev);
+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_buf_ring.desc_phys);
+}
+
+static void
+mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+}
+
static void
mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
{
@@ -246,14 +417,21 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
}
static void
+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
+{
+ mtk_wed_free_rx_buffer(dev);
+ mtk_wed_free_ring(dev, &dev->rro.ring);
+}
+
+static void
mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
mtk_wed_free_ring(dev, &dev->tx_ring[i]);
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
- mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
}
static void
@@ -290,6 +468,38 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
}
}
+#define MTK_WFMDA_RX_DMA_EN BIT(2)
+static void
+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+{
+ u32 val;
+ int i;
+
+ if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
+ return; /* queue is not configured by mt76 */
+
+ for (i = 0; i < 3; i++) {
+ u32 cur_idx;
+
+ cur_idx = wed_r32(dev,
+ MTK_WED_WPDMA_RING_RX_DATA(idx) +
+ MTK_WED_RING_OFS_CPU_IDX);
+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ break;
+
+ usleep_range(100000, 200000);
+ }
+
+ if (i == 3) {
+ dev_err(dev->hw->dev, "rx dma enable failed\n");
+ return;
+ }
+
+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
+ MTK_WFMDA_RX_DMA_EN;
+ wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
+}
+
static void
mtk_wed_dma_disable(struct mtk_wed_device *dev)
{
@@ -303,40 +513,66 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
- wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_TX_DMA_EN |
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
if (dev->hw->version == 1) {
regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
- wdma_m32(dev, MTK_WDMA_GLO_CFG,
- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
- mtk_wed_set_512_support(dev, false);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
}
+
+ mtk_wed_set_512_support(dev, false);
}
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
- mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
- wed_clr(dev, MTK_WED_CTRL,
- MTK_WED_CTRL_WDMA_INT_AGENT_EN |
- MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
- MTK_WED_CTRL_WED_TX_BM_EN |
- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ if (dev->hw->version == 1)
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
+}
+
+static void
+mtk_wed_deinit(struct mtk_wed_device *dev)
+{
+ mtk_wed_stop(dev);
+ mtk_wed_dma_disable(dev);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ if (dev->hw->version == 1)
+ return;
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ MTK_WED_CTRL_WED_RX_BM_EN |
+ MTK_WED_CTRL_RX_RRO_QM_EN);
}
static void
@@ -346,16 +582,19 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mutex_lock(&hw_lock);
- mtk_wed_stop(dev);
-
- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+ mtk_wed_deinit(dev);
+ mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
-
- mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
+ if (mtk_wed_get_rx_capa(dev)) {
+ mtk_wed_wo_reset(dev);
+ mtk_wed_free_rx_rings(dev);
+ mtk_wed_wo_deinit(hw);
+ }
+
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
@@ -431,10 +670,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
} else {
mtk_wed_bus_init(dev);
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
}
}
@@ -443,7 +684,7 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
u32 mask, set;
- mtk_wed_stop(dev);
+ mtk_wed_deinit(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_set_wpdma(dev);
@@ -484,6 +725,132 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
}
}
+static int
+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev,
+ size * sizeof(*ring->desc),
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_size = sizeof(*ring->desc);
+ ring->size = size;
+ memset(ring->desc, 0, size);
+
+ return 0;
+}
+
+#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
+static int
+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
+{
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ int index;
+
+ index = of_property_match_string(dev->hw->node, "memory-region-names",
+ "wo-dlm");
+ if (index < 0)
+ return index;
+
+ np = of_parse_phandle(dev->hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ dev->rro.miod_phys = rmem->base;
+ dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
+
+ return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
+ MTK_WED_RRO_QUE_CNT);
+}
+
+static int
+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ struct {
+ struct {
+ __le32 base;
+ __le32 cnt;
+ __le32 unit;
+ } ring[2];
+ __le32 wed;
+ u8 version;
+ } req = {
+ .ring[0] = {
+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
+ .cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
+ .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
+ },
+ .ring[1] = {
+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
+ MTK_WED_MIOD_COUNT),
+ .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
+ .unit = cpu_to_le32(4),
+ },
+ };
+
+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_WED_CFG,
+ &req, sizeof(req), true);
+}
+
+static void
+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
+ MTK_WED_MIOD_ENTRY_CNT >> 2));
+
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
+
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
+ MTK_WED_RROQM_RST_IDX_MIOD |
+ MTK_WED_RROQM_RST_IDX_FDBK);
+
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
+}
+
+static void
+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
+
+ for (;;) {
+ usleep_range(100, 200);
+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
+ break;
+ }
+
+ /* configure RX_ROUTE_QM */
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ /* enable RX_ROUTE_QM */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+}
+
static void
mtk_wed_hw_init(struct mtk_wed_device *dev)
{
@@ -495,11 +862,11 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
- dev->buf_ring.size / 128) |
+ dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
MTK_WED_TX_RING_SIZE / 256));
- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
@@ -526,9 +893,9 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
- dev->buf_ring.size / 128) |
+ dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
- dev->buf_ring.size / 128));
+ dev->tx_buf_ring.size / 128));
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
@@ -536,18 +903,28 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
- if (dev->hw->version == 1)
+ if (dev->hw->version == 1) {
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- else
+ } else {
wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ /* rx hw init */
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+
+ mtk_wed_rx_buffer_hw_init(dev);
+ mtk_wed_rro_hw_init(dev);
+ mtk_wed_route_qm_hw_init(dev);
+ }
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
}
static void
-mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
{
void *head = (void *)ring->desc;
int i;
@@ -557,49 +934,140 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
desc->buf0 = 0;
- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ if (tx)
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ else
+ desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
desc->buf1 = 0;
desc->info = 0;
}
}
static u32
-mtk_wed_check_busy(struct mtk_wed_device *dev)
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
- if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
- return true;
-
- if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
- MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
- return true;
-
- if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
- return true;
-
- if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
- return true;
-
- if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
- return true;
-
- if (wed_r32(dev, MTK_WED_CTRL) &
- (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
- return true;
-
- return false;
+ return !!(wed_r32(dev, reg) & mask);
}
static int
-mtk_wed_poll_busy(struct mtk_wed_device *dev)
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
int sleep = 15000;
int timeout = 100 * sleep;
u32 val;
return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
- timeout, false, dev);
+ timeout, false, dev, reg, mask);
+}
+
+static int
+mtk_wed_rx_reset(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ u8 val = MTK_WED_WO_STATE_SER_RESET;
+ int i, ret;
+
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
+ sizeof(val), true);
+ if (ret)
+ return ret;
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ }
+
+ /* reset rro qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
+ } else {
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
+ MTK_WED_RROQM_RST_IDX_MIOD |
+ MTK_WED_RROQM_RST_IDX_FDBK);
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ }
+
+ /* reset route qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
+ if (ret)
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+ else
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ MTK_WED_RTQM_Q_RST);
+
+ /* reset tx wdma */
+ mtk_wdma_tx_reset(dev);
+
+ /* reset tx wdma drv */
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+
+ /* reset wed rx dma */
+ ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
+ } else {
+ struct mtk_eth *eth = dev->hw->eth;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ wed_set(dev, MTK_WED_RESET_IDX,
+ MTK_WED_RESET_IDX_RX_V2);
+ else
+ wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ /* reset rx bm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+
+ /* wo change to enable state */
+ val = MTK_WED_WO_STATE_ENABLE;
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
+ sizeof(val), true);
+ if (ret)
+ return ret;
+
+ /* wed_rx_ring_reset */
+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
+ if (!dev->rx_ring[i].desc)
+ continue;
+
+ mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
+ false);
+ }
+ mtk_wed_free_rx_buffer(dev);
+
+ return 0;
}
static void
@@ -613,23 +1081,27 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
if (!dev->tx_ring[i].desc)
continue;
- mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
+ true);
}
- if (mtk_wed_poll_busy(dev))
- busy = mtk_wed_check_busy(dev);
-
+ /* 1. reset WED tx DMA */
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
} else {
- wed_w32(dev, MTK_WED_RESET_IDX,
- MTK_WED_RESET_IDX_TX |
- MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+ /* 2. reset WDMA rx DMA */
+ busy = !!mtk_wdma_rx_reset(dev);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
@@ -646,6 +1118,9 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
}
+ /* 3. reset WED WPDMA tx */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
for (i = 0; i < 100; i++) {
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
@@ -653,8 +1128,19 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
}
mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+ /* 4. reset WED WPDMA tx */
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
+
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
@@ -666,11 +1152,21 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
+ dev->init_done = false;
+ if (dev->hw->version == 1)
+ return;
+
+ if (!busy) {
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ mtk_wed_rx_reset(dev);
}
static int
mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
- int size, u32 desc_size)
+ int size, u32 desc_size, bool tx)
{
ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
@@ -679,18 +1175,24 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
ring->desc_size = desc_size;
ring->size = size;
- mtk_wed_ring_reset(ring, size);
+ mtk_wed_ring_reset(ring, size, tx);
return 0;
}
static int
-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
- struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
+ return -EINVAL;
- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
+ wdma = &dev->rx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+ desc_size, true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
@@ -707,6 +1209,64 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
return 0;
}
+static int
+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
+ return -EINVAL;
+
+ wdma = &dev->tx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+ desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
+
+ if (reset)
+ mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true);
+
+ if (!idx) {
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
+ size);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
+ 0);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
+ 0);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
+ u32 reason, u32 hash)
+{
+ struct mtk_eth *eth = dev->hw->eth;
+ struct ethhdr *eh;
+
+ if (!skb)
+ return;
+
+ if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ return;
+
+ skb_set_mac_header(skb, 0);
+ eh = eth_hdr(skb);
+ skb->protocol = eh->h_proto;
+ mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
+}
+
static void
mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
@@ -729,6 +1289,8 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+ GENMASK(1, 0));
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
@@ -747,6 +1309,16 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+ dev->wlan.rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+ dev->wlan.rx_tbit[1]));
+
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
@@ -784,9 +1356,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
+ int i;
+
wed_set(dev, MTK_WED_WPDMA_CTRL,
MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
@@ -794,6 +1372,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+ 0x2));
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+ mtk_wed_check_wfdma_rx_fill(dev, i);
}
}
@@ -802,9 +1389,12 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
{
int i;
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
- if (!dev->tx_wdma[i].desc)
- mtk_wed_wdma_ring_setup(dev, i, 16);
+ if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ if (!dev->rx_wdma[i].desc)
+ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
@@ -819,9 +1409,22 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
- mtk_wed_set_512_support(dev, true);
+ /* driver set mid ready and only once */
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+
+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+
+ if (mtk_wed_rro_cfg(dev))
+ return;
+
}
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+
mtk_wed_dma_enable(dev);
dev->running = true;
}
@@ -853,7 +1456,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
if (!hw) {
module_put(THIS_MODULE);
ret = -ENODEV;
- goto out;
+ goto unlock;
}
device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
@@ -866,30 +1469,42 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
+ dev->version = hw->version;
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
- ret = mtk_wed_buffer_alloc(dev);
- if (ret) {
- mtk_wed_detach(dev);
+ ret = mtk_wed_tx_buffer_alloc(dev);
+ if (ret)
goto out;
+
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rro_alloc(dev);
+ if (ret)
+ goto out;
}
mtk_wed_hw_init_early(dev);
- if (hw->hifsys)
+ if (hw->version == 1) {
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
-
+ } else {
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
+ ret = mtk_wed_wo_init(hw);
+ }
out:
+ if (ret)
+ mtk_wed_detach(dev);
+unlock:
mutex_unlock(&hw_lock);
return ret;
}
static int
-mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
+ bool reset)
{
struct mtk_wed_ring *ring = &dev->tx_ring[idx];
@@ -905,13 +1520,15 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
* WDMA RX.
*/
- BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
+ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
+ return -EINVAL;
- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
- sizeof(*ring->desc)))
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+ sizeof(*ring->desc), true))
return -ENOMEM;
- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
+ reset))
return -ENOMEM;
ring->reg_base = MTK_WED_RING_TX(idx);
@@ -955,6 +1572,39 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
return 0;
}
+static int
+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
+ bool reset)
+{
+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
+
+ if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
+ return -EINVAL;
+
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
+ sizeof(*ring->desc), false))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
+ reset))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
+ ring->wpdma = regs;
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+
+ /* WPDMA -> WED */
+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_RX_RING_SIZE);
+
+ return 0;
+}
+
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
@@ -1051,7 +1701,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
.tx_ring_setup = mtk_wed_tx_ring_setup,
+ .rx_ring_setup = mtk_wed_rx_ring_setup,
.txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ .msg_update = mtk_wed_mcu_msg_update,
.start = mtk_wed_start,
.stop = mtk_wed_stop,
.reset_dma = mtk_wed_reset_dma,
@@ -1060,6 +1712,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.irq_get = mtk_wed_irq_get,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index ae420ca01a48..e012b8a82133 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
struct mtk_eth;
+struct mtk_wed_wo;
struct mtk_wed_hw {
struct device_node *node;
@@ -22,6 +23,7 @@ struct mtk_wed_hw {
struct regmap *mirror;
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
u32 debugfs_reg;
u32 num_flows;
u8 version;
@@ -85,6 +87,24 @@ wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
}
static inline u32
+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->rx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->rx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->rx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->rx_ring[ring].wpdma + reg);
+}
+
+static inline u32
wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
{
if (!dev->txfree_ring.wpdma)
@@ -126,6 +146,7 @@ static inline int mtk_wed_flow_add(int index)
static inline void mtk_wed_flow_remove(int index)
{
}
+
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index f420f187e837..56f663439721 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
#include <linux/seq_file.h>
+#include <linux/soc/mediatek/mtk_wed.h>
#include "mtk_wed.h"
#include "mtk_wed_regs.h"
@@ -18,6 +19,8 @@ enum {
DUMP_TYPE_WDMA,
DUMP_TYPE_WPDMA_TX,
DUMP_TYPE_WPDMA_TXFREE,
+ DUMP_TYPE_WPDMA_RX,
+ DUMP_TYPE_WED_RRO,
};
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
@@ -36,6 +39,9 @@ enum {
#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
static void
print_reg_val(struct seq_file *s, const char *name, u32 val)
@@ -57,6 +63,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
cur > regs ? "\n" : "",
cur->name);
continue;
+ case DUMP_TYPE_WED_RRO:
case DUMP_TYPE_WED:
val = wed_r32(dev, cur->offset);
break;
@@ -69,6 +76,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
case DUMP_TYPE_WPDMA_TXFREE:
val = wpdma_txfree_r32(dev, cur->offset);
break;
+ case DUMP_TYPE_WPDMA_RX:
+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
+ break;
}
print_reg_val(s, cur->name, val);
}
@@ -132,6 +142,80 @@ wed_txinfo_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+static int
+wed_rxinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WPDMA RX"),
+ DUMP_WPDMA_RX_RING(0),
+ DUMP_WPDMA_RX_RING(1),
+
+ DUMP_STR("WPDMA RX"),
+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
+
+ DUMP_STR("WED RX"),
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
+
+ DUMP_STR("WED RRO"),
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ DUMP_WED(WED_RROQM_MID_MIB),
+ DUMP_WED(WED_RROQM_MOD_MIB),
+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2N_MIB),
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+
+ DUMP_STR("WED WDMA TX"),
+ DUMP_WED(WED_WDMA_TX_MIB),
+ DUMP_WED_RING(WED_WDMA_RING_TX),
+
+ DUMP_STR("WDMA TX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
+
+ DUMP_STR("WED RX BM"),
+ DUMP_WED(WED_RX_BM_BASE),
+ DUMP_WED(WED_RX_BM_RX_DMAD),
+ DUMP_WED(WED_RX_BM_PTR),
+ DUMP_WED(WED_RX_BM_TKID_MIB),
+ DUMP_WED(WED_RX_BM_BLEN),
+ DUMP_WED(WED_RX_BM_STS),
+ DUMP_WED(WED_RX_BM_INTF2),
+ DUMP_WED(WED_RX_BM_INTF),
+ DUMP_WED(WED_RX_BM_ERR_STS),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev)
+ return 0;
+
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
static int
mtk_wed_reg_set(void *data, u64 val)
@@ -175,4 +259,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+ if (hw->version != 1)
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
new file mode 100644
index 000000000000..f9539e6233c9
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sujuan Chen <sujuan.chen@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/mfd/syscon.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <asm/unaligned.h>
+
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+#include "mtk_wed.h"
+
+static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+ return readl(wo->boot.addr + reg);
+}
+
+static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+ writel(val, wo->boot.addr + reg);
+}
+
+static struct sk_buff *
+mtk_wed_mcu_msg_alloc(const void *data, int data_len)
+{
+ int length = sizeof(struct mtk_wed_mcu_hdr) + data_len;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memset(skb->head, 0, length);
+ skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr));
+ if (data && data_len)
+ skb_put_data(skb, data, data_len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
+{
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q),
+ expires - jiffies);
+ return skb_dequeue(&wo->mcu.res_q);
+}
+
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+ skb_queue_tail(&wo->mcu.res_q, skb);
+ wake_up(&wo->mcu.wait);
+}
+
+static void
+mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
+{
+ u32 count = get_unaligned_le32(skb->data);
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
+ stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32));
+ for (i = 0 ; i < count ; i++)
+ wed->wlan.update_wo_rx_stats(wed, &stats[i]);
+}
+
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
+ struct sk_buff *skb)
+{
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+
+ skb_pull(skb, sizeof(*hdr));
+
+ switch (hdr->cmd) {
+ case MTK_WED_WO_EVT_LOG_DUMP:
+ dev_notice(wo->hw->dev, "%s\n", skb->data);
+ break;
+ case MTK_WED_WO_EVT_PROFILING: {
+ struct mtk_wed_wo_log_info *info = (void *)skb->data;
+ u32 count = skb->len / sizeof(*info);
+ int i;
+
+ for (i = 0 ; i < count ; i++)
+ dev_notice(wo->hw->dev,
+ "SN:%u latency: total=%u, rro:%u, mod:%u\n",
+ le32_to_cpu(info[i].sn),
+ le32_to_cpu(info[i].total),
+ le32_to_cpu(info[i].rro),
+ le32_to_cpu(info[i].mod));
+ break;
+ }
+ case MTK_WED_WO_EVT_RXCNT_INFO:
+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
+ break;
+ default:
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static int
+mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
+ int id, int cmd, u16 *wait_seq, bool wait_resp)
+{
+ struct mtk_wed_mcu_hdr *hdr;
+
+ /* TODO: make it dynamic based on cmd */
+ wo->mcu.timeout = 20 * HZ;
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr));
+ hdr->cmd = cmd;
+ hdr->length = cpu_to_le16(skb->len);
+
+ if (wait_resp && wait_seq) {
+ u16 seq = ++wo->mcu.seq;
+
+ if (!seq)
+ seq = ++wo->mcu.seq;
+ *wait_seq = seq;
+
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP);
+ hdr->seq = cpu_to_le16(seq);
+ }
+ if (id == MTK_WED_MODULE_ID_WO)
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
+
+ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
+}
+
+static int
+mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb,
+ int cmd, int seq)
+{
+ struct mtk_wed_mcu_hdr *hdr;
+
+ if (!skb) {
+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+ if (le16_to_cpu(hdr->seq) != seq)
+ return -EAGAIN;
+
+ skb_pull(skb, sizeof(*hdr));
+ switch (cmd) {
+ case MTK_WED_WO_CMD_RXCNT_INFO:
+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
+ const void *data, int len, bool wait_resp)
+{
+ unsigned long expires;
+ struct sk_buff *skb;
+ u16 seq;
+ int ret;
+
+ skb = mtk_wed_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&wo->mcu.mutex);
+
+ ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp);
+ if (ret || !wait_resp)
+ goto unlock;
+
+ expires = jiffies + wo->mcu.timeout;
+ do {
+ skb = mtk_wed_mcu_get_response(wo, expires);
+ ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq);
+ dev_kfree_skb(skb);
+ } while (ret == -EAGAIN);
+
+unlock:
+ mutex_unlock(&wo->mcu.mutex);
+
+ return ret;
+}
+
+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
+ int len)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+ if (dev->hw->version == 1)
+ return 0;
+
+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len,
+ true);
+}
+
+static int
+mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+ struct mtk_wed_wo_memory_region *region)
+{
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ int index;
+
+ index = of_property_match_string(wo->hw->node, "memory-region-names",
+ region->name);
+ if (index < 0)
+ return index;
+
+ np = of_parse_phandle(wo->hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ region->phy_addr = rmem->base;
+ region->size = rmem->size;
+ region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
+
+ return !region->addr ? -EINVAL : 0;
+}
+
+static int
+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
+ struct mtk_wed_wo_memory_region *region)
+{
+ const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct mtk_wed_fw_region *fw_region;
+
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+ first_region_ptr = region_ptr;
+
+ while (region_ptr < trailer_ptr) {
+ u32 length;
+
+ fw_region = (const struct mtk_wed_fw_region *)region_ptr;
+ length = le32_to_cpu(fw_region->len);
+
+ if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ goto next;
+
+ if (region->size < length)
+ goto next;
+
+ if (first_region_ptr < ptr + length)
+ goto next;
+
+ if (region->shared && region->consumed)
+ return 0;
+
+ if (!region->shared || !region->consumed) {
+ memcpy_toio(region->addr, ptr, length);
+ region->consumed = true;
+ return 0;
+ }
+next:
+ region_ptr += sizeof(*fw_region);
+ ptr += length;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+{
+ static struct mtk_wed_wo_memory_region mem_region[] = {
+ [MTK_WED_WO_REGION_EMI] = {
+ .name = "wo-emi",
+ },
+ [MTK_WED_WO_REGION_ILM] = {
+ .name = "wo-ilm",
+ },
+ [MTK_WED_WO_REGION_DATA] = {
+ .name = "wo-data",
+ .shared = true,
+ },
+ };
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct firmware *fw;
+ const char *fw_name;
+ u32 val, boot_cr;
+ int ret, i;
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
+ if (ret)
+ return ret;
+ }
+
+ wo->boot.name = "wo-boot";
+ ret = mtk_wed_get_memory_region(wo, &wo->boot);
+ if (ret)
+ return ret;
+
+ /* set dummy cr */
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+ wo->hw->index + 1);
+
+ /* load firmware */
+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
+ if (ret)
+ return ret;
+
+ trailer = (void *)(fw->data + fw->size -
+ sizeof(struct mtk_wed_fw_trailer));
+ dev_info(wo->hw->dev,
+ "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n",
+ trailer->fw_ver, trailer->build_date);
+ dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
+ trailer->chip_id, trailer->num_region);
+
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* set the start address */
+ boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+ : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ /* wo firmware reset */
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+
+ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+ val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+ : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static u32
+mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo)
+{
+ return wed_r32(wo->hw->wed_dev,
+ MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL);
+}
+
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
+{
+ u32 val;
+ int ret;
+
+ skb_queue_head_init(&wo->mcu.res_q);
+ init_waitqueue_head(&wo->mcu.wait);
+ mutex_init(&wo->mcu.mutex);
+
+ ret = mtk_wed_mcu_load_firmware(wo);
+ if (ret)
+ return ret;
+
+ return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val,
+ 100, MTK_FW_DL_TIMEOUT);
+}
+
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index e270fb336143..0a50bb98c5ea 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -4,6 +4,7 @@
#ifndef __MTK_WED_REGS_H
#define __MTK_WED_REGS_H
+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
@@ -19,15 +20,23 @@ struct mtk_wdma_desc {
__le32 info;
} __packed __aligned(4);
+#define MTK_WED_REV_ID 0x004
+
#define MTK_WED_RESET 0x008
#define MTK_WED_RESET_TX_BM BIT(0)
+#define MTK_WED_RESET_RX_BM BIT(1)
#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
#define MTK_WED_RESET_WED_TX_DMA BIT(12)
+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
#define MTK_WED_RESET_WED BIT(31)
#define MTK_WED_CTRL 0x00c
@@ -39,8 +48,12 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
@@ -62,6 +75,9 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25)
+#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26)
+#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27)
#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
@@ -71,6 +87,8 @@ struct mtk_wdma_desc {
MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
#define MTK_WED_EXT_INT_MASK 0x028
+#define MTK_WED_EXT_INT_MASK1 0x02c
+#define MTK_WED_EXT_INT_MASK2 0x030
#define MTK_WED_STATUS 0x060
#define MTK_WED_STATUS_TX GENMASK(15, 8)
@@ -144,6 +162,8 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_IDX 0x20c
#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
@@ -151,7 +171,9 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
+#define MTK_WED_SCR0 0x3c0
#define MTK_WED_WPDMA_INT_TRIGGER 0x504
#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
@@ -212,6 +234,12 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
@@ -241,11 +269,37 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
+
+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+#define MTK_WED_WPDMA_RX_RING 0x770
+
+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+
+#define MTK_WED_WDMA_RING_TX 0x800
+
+#define MTK_WED_WDMA_TX_MIB 0x810
+
#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
#define MTK_WED_WDMA_GLO_CFG 0xa04
#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
@@ -290,6 +344,20 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+#define MTK_WED_RX_BM_RX_DMAD 0xd80
+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
+
+#define MTK_WED_RX_BM_BASE 0xd84
+#define MTK_WED_RX_BM_INIT_PTR 0xd88
+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
+
+#define MTK_WED_RX_PTR 0xd8c
+
+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
+
#define MTK_WED_RING_OFS_BASE 0x00
#define MTK_WED_RING_OFS_COUNT 0x04
#define MTK_WED_RING_OFS_CPU_IDX 0x08
@@ -300,7 +368,9 @@ struct mtk_wdma_desc {
#define MTK_WDMA_GLO_CFG 0x204
#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
@@ -329,4 +399,70 @@ struct mtk_wdma_desc {
/* DMA channel mapping */
#define HIFSYS_DMA_AG_MAP 0x008
+#define MTK_WED_RTQM_GLO_CFG 0xb00
+#define MTK_WED_RTQM_BUSY BIT(1)
+#define MTK_WED_RTQM_Q_RST BIT(2)
+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
+#define MTK_WED_RTQM_Q2N_MIB 0xb80
+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
+
+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
+
+#define MTK_WED_RROQM_GLO_CFG 0xc04
+#define MTK_WED_RROQM_RST_IDX 0xc08
+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
+
+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
+
+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
+
+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
+
+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
+
+#define MTK_WED_RROQ_BASE_L 0xc80
+#define MTK_WED_RROQ_BASE_H 0xc84
+
+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
+
+#define MTK_WED_RROQM_MID_MIB 0xcc0
+#define MTK_WED_RROQM_MOD_MIB 0xcc4
+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
+
+#define MTK_WED_RX_BM_RX_DMAD 0xd80
+#define MTK_WED_RX_BM_BASE 0xd84
+#define MTK_WED_RX_BM_INIT_PTR 0xd88
+#define MTK_WED_RX_BM_PTR 0xd8c
+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
+
+#define MTK_WED_RX_BM_BLEN 0xd90
+#define MTK_WED_RX_BM_STS 0xd94
+#define MTK_WED_RX_BM_INTF2 0xd98
+#define MTK_WED_RX_BM_INTF 0xd9c
+#define MTK_WED_RX_BM_ERR_STS 0xda8
+
+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+#define MTK_WED_PCIE_INT_MASK 0x0
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
new file mode 100644
index 000000000000..a0a39643caf7
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sujuan Chen <sujuan.chen@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/bitfield.h>
+
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+
+static u32
+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+ u32 val;
+
+ if (regmap_read(wo->mmio.regs, reg, &val))
+ val = ~0;
+
+ return val;
+}
+
+static void
+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+ regmap_write(wo->mmio.regs, reg, val);
+}
+
+static u32
+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
+{
+ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
+
+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
+}
+
+static void
+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
+}
+
+static void
+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
+}
+
+static void
+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wo->mmio.lock, flags);
+ wo->mmio.irq_mask &= ~mask;
+ wo->mmio.irq_mask |= val;
+ if (set)
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+ spin_unlock_irqrestore(&wo->mmio.lock, flags);
+}
+
+static void
+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
+ tasklet_schedule(&wo->mmio.irq_tasklet);
+}
+
+static void
+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
+}
+
+static void
+mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
+}
+
+static void
+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ u32 val)
+{
+ wmb();
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
+}
+
+static void *
+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
+ bool flush)
+{
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+ int index = (q->tail + 1) % q->n_desc;
+ struct mtk_wed_wo_queue_entry *entry;
+ struct mtk_wed_wo_queue_desc *desc;
+ void *buf;
+
+ if (!q->queued)
+ return NULL;
+
+ if (flush)
+ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
+ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
+ return NULL;
+
+ q->tail = index;
+ q->queued--;
+
+ desc = &q->desc[index];
+ entry = &q->entry[index];
+ buf = entry->buf;
+ if (len)
+ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
+ le32_to_cpu(READ_ONCE(desc->ctrl)));
+ if (buf)
+ dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
+ DMA_FROM_DEVICE);
+ entry->buf = NULL;
+
+ return buf;
+}
+
+static int
+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ bool rx)
+{
+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ int n_buf = 0;
+
+ spin_lock_bh(&q->lock);
+ while (q->queued < q->n_desc) {
+ struct mtk_wed_wo_queue_entry *entry;
+ dma_addr_t addr;
+ void *buf;
+
+ buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
+ if (!buf)
+ break;
+
+ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
+ skb_free_frag(buf);
+ break;
+ }
+
+ q->head = (q->head + 1) % q->n_desc;
+ entry = &q->entry[q->head];
+ entry->addr = addr;
+ entry->len = q->buf_size;
+ q->entry[q->head].buf = buf;
+
+ if (rx) {
+ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
+ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
+ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
+ entry->len);
+
+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+ }
+ q->queued++;
+ n_buf++;
+ }
+ spin_unlock_bh(&q->lock);
+
+ return n_buf;
+}
+
+static void
+mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
+{
+ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
+ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
+}
+
+static void
+mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ for (;;) {
+ struct mtk_wed_mcu_hdr *hdr;
+ struct sk_buff *skb;
+ void *data;
+ u32 len;
+
+ data = mtk_wed_wo_dequeue(wo, q, &len, false);
+ if (!data)
+ break;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb) {
+ skb_free_frag(data);
+ continue;
+ }
+
+ __skb_put(skb, len);
+ if (mtk_wed_mcu_check_msg(wo, skb)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
+ mtk_wed_mcu_rx_event(wo, skb);
+ else
+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
+ }
+
+ if (mtk_wed_wo_queue_refill(wo, q, true)) {
+ u32 index = (q->head - 1) % q->n_desc;
+
+ mtk_wed_wo_queue_kick(wo, q, index);
+ }
+}
+
+static irqreturn_t
+mtk_wed_wo_irq_handler(int irq, void *data)
+{
+ struct mtk_wed_wo *wo = data;
+
+ mtk_wed_wo_set_isr(wo, 0);
+ tasklet_schedule(&wo->mmio.irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
+ u32 intr, mask;
+
+ /* disable interrupts */
+ mtk_wed_wo_set_isr(wo, 0);
+
+ intr = mtk_wed_wo_get_isr(wo);
+ intr &= wo->mmio.irq_mask;
+ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
+ mtk_wed_wo_irq_disable(wo, mask);
+
+ if (intr & MTK_WED_WO_RXCH_INT_MASK) {
+ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
+ mtk_wed_wo_rx_complete(wo);
+ }
+}
+
+/* mtk wed wo hw queues */
+
+static int
+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ int n_desc, int buf_size, int index,
+ struct mtk_wed_wo_queue_regs *regs)
+{
+ spin_lock_init(&q->lock);
+ q->regs = *regs;
+ q->n_desc = n_desc;
+ q->buf_size = buf_size;
+
+ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
+ &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
+ q->desc_dma);
+}
+
+static void
+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ struct page *page;
+ int i;
+
+ spin_lock_bh(&q->lock);
+ for (i = 0; i < q->n_desc; i++) {
+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
+
+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
+ DMA_TO_DEVICE);
+ skb_free_frag(entry->buf);
+ entry->buf = NULL;
+ }
+ spin_unlock_bh(&q->lock);
+
+ if (!q->cache.va)
+ return;
+
+ page = virt_to_page(q->cache.va);
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
+ memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ struct page *page;
+
+ spin_lock_bh(&q->lock);
+ for (;;) {
+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
+
+ if (!buf)
+ break;
+
+ skb_free_frag(buf);
+ }
+ spin_unlock_bh(&q->lock);
+
+ if (!q->cache.va)
+ return;
+
+ page = virt_to_page(q->cache.va);
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
+ memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
+ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
+}
+
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ struct sk_buff *skb)
+{
+ struct mtk_wed_wo_queue_entry *entry;
+ struct mtk_wed_wo_queue_desc *desc;
+ int ret = 0, index;
+ u32 ctrl;
+
+ spin_lock_bh(&q->lock);
+
+ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
+ index = (q->head + 1) % q->n_desc;
+ if (q->tail == index) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ entry = &q->entry[index];
+ if (skb->len > entry->len) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ desc = &q->desc[index];
+ q->head = index;
+
+ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
+ DMA_TO_DEVICE);
+ memcpy(entry->buf, skb->data, skb->len);
+ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
+ DMA_TO_DEVICE);
+
+ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
+ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
+ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+ mtk_wed_wo_queue_kick(wo, q, q->head);
+ mtk_wed_wo_kickout(wo);
+out:
+ spin_unlock_bh(&q->lock);
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int
+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
+{
+ return 0;
+}
+
+static int
+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
+{
+ struct mtk_wed_wo_queue_regs regs;
+ struct device_node *np;
+ int ret;
+
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
+ if (!np)
+ return -ENODEV;
+
+ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(wo->mmio.regs)) {
+ ret = PTR_ERR(wo->mmio.regs);
+ goto error_put;
+ }
+
+ wo->mmio.irq = irq_of_parse_and_map(np, 0);
+ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
+ spin_lock_init(&wo->mmio.lock);
+ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
+
+ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
+ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
+ KBUILD_MODNAME, wo);
+ if (ret)
+ goto error;
+
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
+
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
+ &regs);
+ if (ret)
+ goto error;
+
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
+ mtk_wed_wo_queue_reset(wo, &wo->q_tx);
+
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
+
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
+ &regs);
+ if (ret)
+ goto error;
+
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
+ mtk_wed_wo_queue_reset(wo, &wo->q_rx);
+
+ /* rx queue irqmask */
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+
+ return 0;
+
+error:
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+error_put:
+ of_node_put(np);
+ return ret;
+}
+
+static void
+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
+{
+ /* disable interrupts */
+ mtk_wed_wo_set_isr(wo, 0);
+
+ tasklet_disable(&wo->mmio.irq_tasklet);
+
+ disable_irq(wo->mmio.irq);
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+
+ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
+ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
+ mtk_wed_wo_queue_free(wo, &wo->q_tx);
+ mtk_wed_wo_queue_free(wo, &wo->q_rx);
+}
+
+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
+{
+ struct mtk_wed_wo *wo;
+ int ret;
+
+ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
+ if (!wo)
+ return -ENOMEM;
+
+ hw->wed_wo = wo;
+ wo->hw = hw;
+
+ ret = mtk_wed_wo_hardware_init(wo);
+ if (ret)
+ return ret;
+
+ ret = mtk_wed_mcu_init(wo);
+ if (ret)
+ return ret;
+
+ return mtk_wed_wo_exception_init(wo);
+}
+
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
+{
+ struct mtk_wed_wo *wo = hw->wed_wo;
+
+ mtk_wed_wo_hw_deinit(wo);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
new file mode 100644
index 000000000000..c8fb85795864
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@kernel.org> */
+
+#ifndef __MTK_WED_WO_H
+#define __MTK_WED_WO_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+struct mtk_wed_hw;
+
+struct mtk_wed_mcu_hdr {
+ /* DW0 */
+ u8 version;
+ u8 cmd;
+ __le16 length;
+
+ /* DW1 */
+ __le16 seq;
+ __le16 flag;
+
+ /* DW2 */
+ __le32 status;
+
+ /* DW3 */
+ u8 rsv[20];
+};
+
+struct mtk_wed_wo_log_info {
+ __le32 sn;
+ __le32 total;
+ __le32 rro;
+ __le32 mod;
+};
+
+enum mtk_wed_wo_event {
+ MTK_WED_WO_EVT_LOG_DUMP = 0x1,
+ MTK_WED_WO_EVT_PROFILING = 0x2,
+ MTK_WED_WO_EVT_RXCNT_INFO = 0x3,
+};
+
+#define MTK_WED_MODULE_ID_WO 1
+#define MTK_FW_DL_TIMEOUT 4000000 /* us */
+#define MTK_WOCPU_TIMEOUT 2000000 /* us */
+
+enum {
+ MTK_WED_WARP_CMD_FLAG_RSP = BIT(0),
+ MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1),
+ MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2),
+};
+
+#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050
+#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20
+#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1
+
+enum {
+ MTK_WED_WO_REGION_EMI,
+ MTK_WED_WO_REGION_ILM,
+ MTK_WED_WO_REGION_DATA,
+ MTK_WED_WO_REGION_BOOT,
+ __MTK_WED_WO_REGION_MAX,
+};
+
+enum mtk_wed_wo_state {
+ MTK_WED_WO_STATE_UNDEFINED,
+ MTK_WED_WO_STATE_INIT,
+ MTK_WED_WO_STATE_ENABLE,
+ MTK_WED_WO_STATE_DISABLE,
+ MTK_WED_WO_STATE_HALT,
+ MTK_WED_WO_STATE_GATING,
+ MTK_WED_WO_STATE_SER_RESET,
+ MTK_WED_WO_STATE_WF_RESET,
+};
+
+enum mtk_wed_wo_done_state {
+ MTK_WED_WOIF_UNDEFINED,
+ MTK_WED_WOIF_DISABLE_DONE,
+ MTK_WED_WOIF_TRIGGER_ENABLE,
+ MTK_WED_WOIF_ENABLE_DONE,
+ MTK_WED_WOIF_TRIGGER_GATING,
+ MTK_WED_WOIF_GATING_DONE,
+ MTK_WED_WOIF_TRIGGER_HALT,
+ MTK_WED_WOIF_HALT_DONE,
+};
+
+enum mtk_wed_dummy_cr_idx {
+ MTK_WED_DUMMY_CR_FWDL,
+ MTK_WED_DUMMY_CR_WO_STATUS,
+};
+
+#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
+#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
+
+#define MTK_WO_MCU_CFG_LS_BASE 0
+#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
+#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004)
+#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c)
+#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c)
+#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050)
+#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060)
+#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064)
+
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
+
+#define MTK_WED_WO_RING_SIZE 256
+#define MTK_WED_WO_CMD_LEN 1504
+
+#define MTK_WED_WO_TXCH_NUM 0
+#define MTK_WED_WO_RXCH_NUM 1
+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
+
+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
+#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
+ MTK_WED_WO_EXCEPTION_INT_MASK)
+
+#define MTK_WED_WO_CCIF_BUSY 0x004
+#define MTK_WED_WO_CCIF_START 0x008
+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
+#define MTK_WED_WO_CCIF_RCHNUM 0x010
+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
+
+#define MTK_WED_WO_CCIF_ACK 0x014
+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
+#define MTK_WED_WO_CCIF_DUMMY1 0x020
+#define MTK_WED_WO_CCIF_DUMMY2 0x024
+#define MTK_WED_WO_CCIF_DUMMY3 0x028
+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
+#define MTK_WED_WO_CCIF_SHADOW1 0x030
+#define MTK_WED_WO_CCIF_SHADOW2 0x034
+#define MTK_WED_WO_CCIF_SHADOW3 0x038
+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
+#define MTK_WED_WO_CCIF_DUMMY5 0x050
+#define MTK_WED_WO_CCIF_DUMMY6 0x054
+#define MTK_WED_WO_CCIF_DUMMY7 0x058
+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
+#define MTK_WED_WO_CCIF_SHADOW5 0x060
+#define MTK_WED_WO_CCIF_SHADOW6 0x064
+#define MTK_WED_WO_CCIF_SHADOW7 0x068
+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
+
+#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
+#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
+#define MTK_WED_WO_CTL_BURST BIT(15)
+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
+#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
+#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
+#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
+#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
+
+struct mtk_wed_wo_memory_region {
+ const char *name;
+ void __iomem *addr;
+ phys_addr_t phy_addr;
+ u32 size;
+ bool shared:1;
+ bool consumed:1;
+};
+
+struct mtk_wed_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 rsv0[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 rsv1[15];
+} __packed;
+
+struct mtk_wed_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 num_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 rsv[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+};
+
+struct mtk_wed_wo_queue_regs {
+ u32 desc_base;
+ u32 ring_size;
+ u32 cpu_idx;
+ u32 dma_idx;
+};
+
+struct mtk_wed_wo_queue_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+ __le32 reserved[4];
+} __packed __aligned(32);
+
+struct mtk_wed_wo_queue_entry {
+ dma_addr_t addr;
+ void *buf;
+ u32 len;
+};
+
+struct mtk_wed_wo_queue {
+ struct mtk_wed_wo_queue_regs regs;
+
+ struct page_frag_cache cache;
+ spinlock_t lock;
+
+ struct mtk_wed_wo_queue_desc *desc;
+ dma_addr_t desc_dma;
+
+ struct mtk_wed_wo_queue_entry *entry;
+
+ u16 head;
+ u16 tail;
+ int n_desc;
+ int queued;
+ int buf_size;
+
+};
+
+struct mtk_wed_wo {
+ struct mtk_wed_hw *hw;
+ struct mtk_wed_wo_memory_region boot;
+
+ struct mtk_wed_wo_queue q_tx;
+ struct mtk_wed_wo_queue q_rx;
+
+ struct {
+ struct mutex mutex;
+ int timeout;
+ u16 seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+ } mcu;
+
+ struct {
+ struct regmap *regs;
+
+ spinlock_t lock;
+ struct tasklet_struct irq_tasklet;
+ int irq;
+ u32 irq_mask;
+ } mmio;
+};
+
+static inline int
+mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+
+ if (hdr->version)
+ return -EINVAL;
+
+ if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length))
+ return -EINVAL;
+
+ return 0;
+}
+
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
+ struct sk_buff *skb);
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
+ const void *data, int len, bool wait_resp);
+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
+ int len);
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
+ struct sk_buff *skb);
+
+#endif /* __MTK_WED_WO_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 024788549c25..98b5ffb4d729 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -111,34 +111,27 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
}
/**
- * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * mlx4_en_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
- * @delta: Desired frequency change in parts per billion
+ * @scaled_ppm: Desired frequency change in scaled parts per million
*
- * Adjust the frequency of the PHC cycle counter by the indicated delta from
- * the base frequency.
+ * Adjust the frequency of the PHC cycle counter by the indicated scaled_ppm
+ * from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
**/
-static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int mlx4_en_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- u64 adj;
- u32 diff, mult;
- int neg_adj = 0;
+ u32 mult;
unsigned long flags;
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
- mult = mdev->nominal_c_mult;
- adj = mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
+ mult = (u32)adjust_by_scaled_ppm(mdev->nominal_c_mult, scaled_ppm);
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_read(&mdev->clock);
- mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+ mdev->cycles.mult = mult;
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
@@ -237,7 +230,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = mlx4_en_phc_adjfreq,
+ .adjfine = mlx4_en_phc_adjfine,
.adjtime = mlx4_en_phc_adjtime,
.gettime64 = mlx4_en_phc_gettime,
.settime64 = mlx4_en_phc_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index ca4b93a01034..8800d3f1f55c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2337,11 +2337,8 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
- if (priv->registered) {
- devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
- priv->port));
+ if (priv->registered)
unregister_netdev(dev);
- }
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@@ -3474,6 +3471,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
mdev->profile.prof[priv->port].tx_ppp,
mdev->profile.prof[priv->port].tx_pause);
+ SET_NETDEV_DEVLINK_PORT(dev,
+ mlx4_get_devlink_port(mdev->dev, priv->port));
err = register_netdev(dev);
if (err) {
en_err(priv, "Netdev registration failed for port %d\n", port);
@@ -3481,8 +3480,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
priv->registered = 1;
- devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
- dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 43a4102e9c09..c5758637b7be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -65,7 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->sp_stride = stride;
- ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
+ ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
@@ -77,9 +77,11 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
- ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
+ ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE,
+ GFP_KERNEL, node);
if (!ring->bounce_buf) {
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE,
+ GFP_KERNEL);
if (!ring->bounce_buf) {
err = -ENOMEM;
goto err_info;
@@ -909,11 +911,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size >> LOG_TXBB_SIZE;
- if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop_count;
- }
bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
@@ -941,6 +938,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
else {
+ if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) {
+ if (netif_msg_tx_err(priv))
+ en_warn(priv, "Oversized header or SG list\n");
+ goto tx_drop_count;
+ }
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
bf_ok = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d3fc86cd3c1d..3ae246391549 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3043,7 +3043,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
*/
if (!IS_ENABLED(CONFIG_MLX4_EN) &&
dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- devlink_port_type_eth_set(&info->devlink_port, NULL);
+ devlink_port_type_eth_set(&info->devlink_port);
else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(&info->devlink_port, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index e132ff4c82f2..3d4226ddba5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -89,9 +89,19 @@
#define MLX4_EN_FILTER_HASH_SHIFT 4
#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
-/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
-#define MAX_DESC_SIZE 512
-#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
+#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
+#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
+
+/* Maximal size of the bounce buffer:
+ * 256 bytes for LSO headers.
+ * CTRL_SIZE for control desc.
+ * DS_SIZE if skb->head contains some payload.
+ * MAX_SKB_FRAGS frags.
+ */
+#define MLX4_TX_BOUNCE_BUFFER_SIZE \
+ ALIGN(256 + CTRL_SIZE + DS_SIZE + MAX_SKB_FRAGS * DS_SIZE, TXBB_SIZE)
+
+#define MLX4_MAX_DESC_TXBBS (MLX4_TX_BOUNCE_BUFFER_SIZE / TXBB_SIZE)
/*
* OS related constants and tunables
@@ -217,9 +227,7 @@ struct mlx4_en_tx_info {
#define MLX4_EN_BIT_DESC_OWN 0x80000000
-#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
#define MLX4_EN_MEMTYPE_PAD 0x100
-#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
struct mlx4_en_tx_desc {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e7a894ba5c3e..d3ca745d107d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/random.h>
-#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eq.h>
#include <linux/debugfs.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 66c6a7017695..ddb197970c22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,10 +46,6 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
-
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -318,6 +314,10 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
.rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+ .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
+ .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
@@ -840,7 +840,7 @@ static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
};
-static int mlx5_devlink_traps_register(struct devlink *devlink)
+int mlx5_devlink_traps_register(struct devlink *devlink)
{
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
@@ -862,7 +862,7 @@ err_trap_group:
return err;
}
-static void mlx5_devlink_traps_unregister(struct devlink *devlink)
+void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
@@ -889,17 +889,11 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
- err = mlx5_devlink_traps_register(devlink);
- if (err)
- goto traps_reg_err;
-
if (!mlx5_core_is_mp_slave(dev))
devlink_set_features(devlink, DEVLINK_F_RELOAD);
return 0;
-traps_reg_err:
- mlx5_devlink_max_uc_list_param_unregister(devlink);
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
@@ -910,7 +904,6 @@ auxdev_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
- mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_max_uc_list_param_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 30bf4882779b..fd033df24856 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -30,6 +30,8 @@ void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_
int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action);
+int mlx5_devlink_traps_register(struct devlink *devlink);
+void mlx5_devlink_traps_unregister(struct devlink *devlink);
struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 26a23047f1f3..65790ff58a74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -103,11 +103,11 @@ struct page_pool;
* size actually used at runtime, but it's not a problem when calculating static
* array sizes.
*/
-#define MLX5_UMR_MAX_MTT_SPACE \
+#define MLX5_UMR_MAX_FLEX_SPACE \
(ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
- MLX5_UMR_MTT_ALIGNMENT))
+ MLX5_UMR_FLEX_ALIGNMENT))
#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
- rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
+ rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt))
#define MLX5E_MAX_RQ_NUM_MTTS \
(ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
@@ -160,7 +160,7 @@ struct page_pool;
(((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
@@ -344,6 +344,7 @@ enum {
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
+ MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */
};
struct mlx5e_cq {
@@ -370,6 +371,7 @@ struct mlx5e_cq_decomp {
u8 mini_arr_idx;
u16 left;
u16 wqe_counter;
+ bool last_cqe_title;
} ____cacheline_aligned_in_smp;
enum mlx5e_dma_map_type {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index b69f9d10ccbd..83adaabf59f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -51,13 +51,6 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
return ret;
}
-void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
-{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
-
- devlink_port_type_eth_set(dl_port, priv->netdev);
-}
-
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
@@ -69,13 +62,3 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
devl_unlock(devlink);
}
-
-struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!netif_device_present(dev))
- return NULL;
-
- return mlx5e_devlink_get_dl_port(priv);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 10b50feb9883..4f238d4fff55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -9,8 +9,6 @@
int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
-void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
-struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
static inline struct devlink_port *
mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 29dd3a04c154..585bdc8383ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -107,7 +107,7 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
- MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
+ MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
@@ -146,7 +146,7 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
u16 umr_wqe_sz;
umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
- ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+ ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
@@ -607,14 +607,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
-
- mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
- BIT(params->log_rq_mtu_frames),
- BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -852,6 +844,10 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_compression_layout,
+ MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED :
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 034debd140bc..c9be6eb88012 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -154,4 +154,18 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d %s)\n",
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
+ BIT(params->log_rq_mtu_frames),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS),
+ MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
+ "enhanced" : "basic");
+};
+
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index fac7e3ff2674..b08339d986d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -690,7 +690,6 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) {
- WARN_ON_ONCE(true);
netdev_dbg(priv->netdev,
"Couldn't find tunnel for tun_id: %d, err: %d\n",
tun_id, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
index 21aab96357b5..a278f52d52b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -28,4 +28,5 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_accept = {
.can_offload = tc_act_can_offload_accept,
.parse_action = tc_act_parse_accept,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 3337241cfd84..eba0c8698926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -11,7 +11,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
[FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
- [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_redirect,
[FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
[FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
[FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index e1570ff056ae..8346557eeaf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -32,6 +32,11 @@ struct mlx5e_tc_act_parse_state {
struct mlx5_tc_ct_priv *ct_priv;
};
+struct mlx5e_tc_act_branch_ctrl {
+ enum flow_action_id act_id;
+ u32 extval;
+};
+
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -60,6 +65,12 @@ struct mlx5e_tc_act {
int (*stats_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act);
+
+ bool (*get_branch_ctrl)(const struct flow_action_entry *act,
+ struct mlx5e_tc_act_branch_ctrl *cond_true,
+ struct mlx5e_tc_act_branch_ctrl *cond_false);
+
+ bool is_terminating_action;
};
struct mlx5e_tc_flow_action {
@@ -81,6 +92,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
+extern struct mlx5e_tc_act mlx5e_tc_act_redirect;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
index dd025a95c439..7d16aeabb119 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -27,4 +27,5 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_drop = {
.can_offload = tc_act_can_offload_drop,
.parse_action = tc_act_parse_drop,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 25174f68613e..0923e6db2d0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -121,4 +121,5 @@ struct mlx5e_tc_act mlx5e_tc_act_goto = {
.can_offload = tc_act_can_offload_goto,
.parse_action = tc_act_parse_goto,
.post_parse = tc_act_post_parse_goto,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 4ac7de3f6afa..78c427b38048 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -334,4 +334,11 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_mirred = {
.can_offload = tc_act_can_offload_mirred,
.parse_action = tc_act_parse_mirred,
+ .is_terminating_action = false,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_redirect = {
+ .can_offload = tc_act_can_offload_mirred,
+ .parse_action = tc_act_parse_mirred,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
index 90b4c1b34776..7f409692b18f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -48,4 +48,5 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
.can_offload = tc_act_can_offload_mirred_nic,
.parse_action = tc_act_parse_mirred_nic,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index c8e5ca65bb6e..898fe16a4384 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -4,20 +4,54 @@
#include "act.h"
#include "en/tc_priv.h"
+static bool police_act_validate_control(enum flow_action_id act_id,
+ struct netlink_ext_ack *extack)
+{
+ if (act_id != FLOW_ACTION_PIPE &&
+ act_id != FLOW_ACTION_ACCEPT &&
+ act_id != FLOW_ACTION_JUMP &&
+ act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform-exceed action is not pipe, ok, jump or drop");
+ return false;
+ }
+
+ return true;
+}
+
+static int police_act_validate(const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (!police_act_validate_control(act->police.exceed.act_id, extack) ||
+ !police_act_validate_control(act->police.notexceed.act_id, extack))
+ return -EOPNOTSUPP;
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static bool
tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
- act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
- NL_SET_ERR_MSG_MOD(parse_state->extack,
- "Offload not supported when conform action is not pipe or ok");
- return false;
- }
- if (mlx5e_policer_validate(parse_state->flow_action, act,
- parse_state->extack))
+ int err;
+
+ err = police_act_validate(act, parse_state->extack);
+ if (err)
return false;
return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
@@ -79,7 +113,7 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
- err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ err = police_act_validate(act, fl_act->extack);
if (err)
return err;
@@ -147,6 +181,19 @@ tc_act_police_stats(struct mlx5e_priv *priv,
return 0;
}
+static bool
+tc_act_police_get_branch_ctrl(const struct flow_action_entry *act,
+ struct mlx5e_tc_act_branch_ctrl *cond_true,
+ struct mlx5e_tc_act_branch_ctrl *cond_false)
+{
+ cond_true->act_id = act->police.notexceed.act_id;
+ cond_true->extval = act->police.notexceed.extval;
+
+ cond_false->act_id = act->police.exceed.act_id;
+ cond_false->extval = act->police.exceed.extval;
+ return true;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_police = {
.can_offload = tc_act_can_offload_police,
.parse_action = tc_act_parse_police,
@@ -154,4 +201,5 @@ struct mlx5e_tc_act mlx5e_tc_act_police = {
.offload_action = tc_act_police_offload,
.destroy_action = tc_act_police_destroy,
.stats_action = tc_act_police_stats,
+ .get_branch_ctrl = tc_act_police_get_branch_ctrl,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index 53b270f652b9..915ce201aeb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -3,6 +3,7 @@
#include "act.h"
#include "en/tc_priv.h"
+#include "eswitch.h"
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
@@ -10,13 +11,6 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
int act_index,
struct mlx5_flow_attr *attr)
{
- struct netlink_ext_ack *extack = parse_state->extack;
-
- if (parse_state->flow_action->num_entries != 1) {
- NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
- return false;
- }
-
return true;
}
@@ -27,7 +21,7 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
+ attr->dest_ft = mlx5_eswitch_get_slow_fdb(priv->mdev->priv.eswitch);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index be74e1403328..4e5f4aa44724 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -257,16 +257,16 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
counter = mlx5_fc_create(mdev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
- goto err_red_counter;
+ goto err_drop_counter;
}
- meter->red_counter = counter;
+ meter->drop_counter = counter;
counter = mlx5_fc_create(mdev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
- goto err_green_counter;
+ goto err_act_counter;
}
- meter->green_counter = counter;
+ meter->act_counter = counter;
meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
struct mlx5e_flow_meter_aso_obj,
@@ -313,10 +313,10 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
err_mem:
mlx5e_flow_meter_destroy_aso_obj(mdev, id);
err_create:
- mlx5_fc_destroy(mdev, meter->green_counter);
-err_green_counter:
- mlx5_fc_destroy(mdev, meter->red_counter);
-err_red_counter:
+ mlx5_fc_destroy(mdev, meter->act_counter);
+err_act_counter:
+ mlx5_fc_destroy(mdev, meter->drop_counter);
+err_drop_counter:
kfree(meter);
return ERR_PTR(err);
}
@@ -329,8 +329,8 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
struct mlx5e_flow_meter_aso_obj *meters_obj;
int n, pos;
- mlx5_fc_destroy(mdev, meter->green_counter);
- mlx5_fc_destroy(mdev, meter->red_counter);
+ mlx5_fc_destroy(mdev, meter->act_counter);
+ mlx5_fc_destroy(mdev, meter->drop_counter);
meters_obj = meter->meters_obj;
pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
@@ -575,8 +575,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 bytes1, packets1, lastuse1;
u64 bytes2, packets2, lastuse2;
- mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
- mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+ mlx5_fc_query_cached(meter->act_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->drop_counter, &bytes2, &packets2, &lastuse2);
*bytes = bytes1 + bytes2;
*packets = packets1 + packets2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 6de6e8a16327..f16abf33bb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -32,8 +32,8 @@ struct mlx5e_flow_meter_handle {
struct hlist_node hlist;
struct mlx5e_flow_meter_params params;
- struct mlx5_fc *green_counter;
- struct mlx5_fc *red_counter;
+ struct mlx5_fc *act_counter;
+ struct mlx5_fc *drop_counter;
};
struct mlx5e_meter_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
index 8b77e822810e..c38211097746 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -11,8 +11,10 @@
struct mlx5e_post_meter_priv {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
- struct mlx5_flow_handle *fwd_green_rule;
- struct mlx5_flow_handle *drop_red_rule;
+ struct mlx5_flow_handle *green_rule;
+ struct mlx5_flow_attr *green_attr;
+ struct mlx5_flow_handle *red_rule;
+ struct mlx5_flow_attr *red_attr;
};
struct mlx5_flow_table *
@@ -81,15 +83,48 @@ mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
return err;
}
+static struct mlx5_flow_handle *
+mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_flow_handle *ret;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DROP)
+ attr->counter = drop_counter;
+ else
+ attr->counter = act_counter;
+
+ attr->ft = post_meter->ft;
+ attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
+ attr->outer_match_level = MLX5_MATCH_NONE;
+ attr->chain = 0;
+ attr->prio = 0;
+
+ ret = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ /* We did not create the counter, so we can't delete it.
+ * Avoid freeing the counter when the attr is deleted in free_branching_attr
+ */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return ret;
+}
+
static int
mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
struct mlx5e_post_meter_priv *post_meter,
struct mlx5e_post_act *post_act,
- struct mlx5_fc *green_counter,
- struct mlx5_fc *red_counter)
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
{
- struct mlx5_flow_destination dest[2] = {};
- struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -100,52 +135,45 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[0].counter_id = mlx5_fc_id(red_counter);
- rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, spec, red_attr,
+ act_counter, drop_counter);
if (IS_ERR(rule)) {
- mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter exceed rule\n");
err = PTR_ERR(rule);
goto err_red;
}
- post_meter->drop_red_rule = rule;
+ post_meter->red_rule = rule;
+ post_meter->red_attr = red_attr;
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(green_counter);
-
- rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, spec, green_attr,
+ act_counter, drop_counter);
if (IS_ERR(rule)) {
- mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter notexceed rule\n");
err = PTR_ERR(rule);
goto err_green;
}
- post_meter->fwd_green_rule = rule;
+ post_meter->green_rule = rule;
+ post_meter->green_attr = green_attr;
kvfree(spec);
return 0;
err_green:
- mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(post_meter->red_rule);
err_red:
kvfree(spec);
return err;
}
static void
-mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rules_destroy(struct mlx5_eswitch *esw,
+ struct mlx5e_post_meter_priv *post_meter)
{
- mlx5_del_flow_rules(post_meter->drop_red_rule);
- mlx5_del_flow_rules(post_meter->fwd_green_rule);
+ mlx5_eswitch_del_offloaded_rule(esw, post_meter->red_rule, post_meter->red_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, post_meter->green_rule, post_meter->green_attr);
}
static void
@@ -164,8 +192,10 @@ struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
- struct mlx5_fc *green_counter,
- struct mlx5_fc *red_counter)
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *branch_true,
+ struct mlx5_flow_attr *branch_false)
{
struct mlx5e_post_meter_priv *post_meter;
int err;
@@ -182,8 +212,8 @@ mlx5e_post_meter_init(struct mlx5e_priv *priv,
if (err)
goto err_fg;
- err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
- red_counter);
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, act_counter,
+ drop_counter, branch_true, branch_false);
if (err)
goto err_rules;
@@ -199,9 +229,9 @@ err_ft:
}
void
-mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter)
{
- mlx5e_post_meter_rules_destroy(post_meter);
+ mlx5e_post_meter_rules_destroy(esw, post_meter);
mlx5e_post_meter_fg_destroy(post_meter);
mlx5e_post_meter_table_destroy(post_meter);
kfree(post_meter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
index 34d0e4b9fc7a..a4075d33fde2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -21,9 +21,11 @@ struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
- struct mlx5_fc *green_counter,
- struct mlx5_fc *red_counter);
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *branch_true,
+ struct mlx5_flow_attr *branch_false);
void
-mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter);
#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 864ce0c393e6..a69849e0deed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1774,35 +1774,42 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
- * +---------------------+
- * + ft prio (tc chain) +
- * + original match +
- * +---------------------+
- * | set chain miss mapping
- * | set fte_id
- * | set tunnel_id
- * | do decap
- * v
- * +---------------------+
- * + pre_ct/pre_ct_nat + if matches +-------------------------+
- * + zone+nat match +---------------->+ post_act (see below) +
- * +---------------------+ set zone +-------------------------+
- * | set zone
- * v
- * +--------------------+
- * + CT (nat or no nat) +
- * + tuple + zone match +
- * +--------------------+
- * | set mark
- * | set labels_id
- * | set established
- * | set zone_restore
- * | do nat (if needed)
- * v
- * +--------------+
- * + post_act + original filter actions
- * + fte_id match +------------------------>
- * +--------------+
+ * +---------------------+
+ * + ft prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * | set chain miss mapping
+ * | set fte_id
+ * | set tunnel_id
+ * | do decap
+ * |
+ * +-------------+
+ * | Chain 0 |
+ * | optimization|
+ * | v
+ * | +---------------------+
+ * | + pre_ct/pre_ct_nat + if matches +----------------------+
+ * | + zone+nat match +---------------->+ post_act (see below) +
+ * | +---------------------+ set zone +----------------------+
+ * | |
+ * +-------------+ set zone
+ * |
+ * v
+ * +--------------------+
+ * + CT (nat or no nat) +
+ * + tuple + zone match +
+ * +--------------------+
+ * | set mark
+ * | set labels_id
+ * | set established
+ * | set zone_restore
+ * | do nat (if needed)
+ * v
+ * +--------------+
+ * + post_act + original filter actions
+ * + fte_id match +------------------------>
+ * +--------------+
+ *
*/
static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
@@ -1818,6 +1825,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_flow *ct_flow;
int chain_mapping = 0, err;
struct mlx5_ct_ft *ft;
+ u16 zone;
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) {
@@ -1884,6 +1892,25 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
}
+ /* Change original rule point to ct table
+ * Chain 0 sets the zone and jumps to ct table
+ * Other chains jump to pre_ct table to align with act_ct cached logic
+ */
+ pre_ct_attr->dest_chain = 0;
+ if (!attr->chain) {
+ zone = ft->zone & MLX5_CT_ZONE_MASK;
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
+ ZONE_TO_REG, zone);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ } else {
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
+ }
+
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts->num_actions,
pre_mod_acts->actions);
@@ -1893,10 +1920,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
goto err_mapping;
}
pre_ct_attr->modify_hdr = mod_hdr;
-
- /* Change original rule point to ct table */
- pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 2e42d7c5451e..2b7fd1c0e643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -211,8 +211,4 @@ struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
-int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack);
-
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index a715601865d3..1b03ab03fc5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -345,29 +345,27 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
kfree(sa_entry);
}
-int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec;
- int ret;
+ int ret = -ENOMEM;
if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
- return 0;
+ return;
}
ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec)
- return -ENOMEM;
+ return;
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
- if (!ipsec->wq) {
- ret = -ENOMEM;
+ if (!ipsec->wq)
goto err_wq;
- }
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
@@ -375,13 +373,14 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
priv->ipsec = ipsec;
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
- return 0;
+ return;
err_fs_init:
destroy_workqueue(ipsec->wq);
err_wq:
kfree(ipsec);
- return (ret != -EOPNOTSUPP) ? ret : 0;
+ mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
+ return;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 16bcceec16c4..4c47347d0ee2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -146,7 +146,7 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_modify_state_work modify_work;
};
-int mlx5e_ipsec_init(struct mlx5e_priv *priv);
+void mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -174,9 +174,8 @@ mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
return sa_entry->ipsec->mdev;
}
#else
-static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
- return 0;
}
static inline void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 2e0335246967..78072bf93f3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -125,10 +125,8 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
/* struct for callback API management */
struct mlx5e_async_ctx {
struct mlx5_async_work context;
- struct mlx5_async_ctx async_ctx;
- struct work_struct work;
+ struct mlx5_async_ctx *async_ctx;
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct completion complete;
int err;
union {
u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
@@ -136,34 +134,33 @@ struct mlx5e_async_ctx {
};
};
-static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+struct mlx5e_bulk_async_ctx {
+ struct mlx5_async_ctx async_ctx;
+ DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
+};
+
+static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
{
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
+ int sz;
int i;
- bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ sz = struct_size(bulk_async, arr, n);
+ bulk_async = kvzalloc(sz, GFP_KERNEL);
if (!bulk_async)
return NULL;
- for (i = 0; i < n; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
+ mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
- mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
- init_completion(&async->complete);
- }
+ for (i = 0; i < n; i++)
+ bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
return bulk_async;
}
-static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
{
- int i;
-
- for (i = 0; i < n; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
-
- mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
- }
+ mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
kvfree(bulk_async);
}
@@ -176,12 +173,10 @@ static void create_tis_callback(int status, struct mlx5_async_work *context)
if (status) {
async->err = status;
priv_tx->create_err = 1;
- goto out;
+ return;
}
priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
-out:
- complete(&async->complete);
}
static void destroy_tis_callback(int status, struct mlx5_async_work *context)
@@ -190,7 +185,6 @@ static void destroy_tis_callback(int status, struct mlx5_async_work *context)
container_of(context, struct mlx5e_async_ctx, context);
struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
- complete(&async->complete);
kfree(priv_tx);
}
@@ -214,7 +208,7 @@ mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw
goto err_out;
} else {
async->priv_tx = priv_tx;
- err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
async->out_create, sizeof(async->out_create),
create_tis_callback, &async->context);
if (err)
@@ -232,13 +226,12 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
struct mlx5e_async_ctx *async)
{
if (priv_tx->create_err) {
- complete(&async->complete);
kfree(priv_tx);
return;
}
async->priv_tx = priv_tx;
mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
- &async->async_ctx,
+ async->async_ctx,
async->out_destroy, sizeof(async->out_destroy),
destroy_tis_callback, &async->context);
}
@@ -247,7 +240,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
struct mlx5e_ktls_offload_context_tx *obj, *n;
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
int i;
bulk_async = mlx5e_bulk_async_init(mdev, size);
@@ -256,16 +249,11 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
i = 0;
list_for_each_entry_safe(obj, n, list, list_node) {
- mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
i++;
}
- for (i = 0; i < size; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
-
- wait_for_completion(&async->complete);
- }
- mlx5e_bulk_async_cleanup(bulk_async, size);
+ mlx5e_bulk_async_cleanup(bulk_async);
}
/* Recycling pool API */
@@ -291,7 +279,7 @@ static void create_work(struct work_struct *work)
struct mlx5e_tls_tx_pool *pool =
container_of(work, struct mlx5e_tls_tx_pool, create_work);
struct mlx5e_ktls_offload_context_tx *obj;
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
LIST_HEAD(local_list);
int i, j, err = 0;
@@ -300,7 +288,7 @@ static void create_work(struct work_struct *work)
return;
for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
- obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
@@ -309,14 +297,13 @@ static void create_work(struct work_struct *work)
}
for (j = 0; j < i; j++) {
- struct mlx5e_async_ctx *async = &bulk_async[j];
+ struct mlx5e_async_ctx *async = &bulk_async->arr[j];
- wait_for_completion(&async->complete);
if (!err && async->err)
err = async->err;
}
atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
- mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ mlx5e_bulk_async_cleanup(bulk_async);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index f900709639f6..9369a580743e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -186,7 +186,7 @@ static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macs
return err;
}
- dma_device = &mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
if (err) {
@@ -1299,12 +1299,12 @@ static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
struct mlx5_aso_ctrl_param *param)
{
+ struct mlx5e_macsec_umr *umr = macsec_aso->umr;
+
memset(aso_ctrl, 0, sizeof(*aso_ctrl));
- if (macsec_aso->umr->dma_addr) {
- aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
- aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
- aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
- }
+ aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
+ aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
+ aso_ctrl->l_key = cpu_to_be32(umr->mkey);
if (!param)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 0ae1865086ff..bed0c2d043e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -57,7 +57,6 @@ struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
- struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
@@ -376,7 +375,6 @@ int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
return -ENOMEM;
spin_lock_init(&arfs->arfs_lock);
- INIT_LIST_HEAD(&arfs->rules);
arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!arfs->wq)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 1728e197558d..7708acc9b2ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2445,4 +2445,5 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats,
+ .get_link_ext_stats = mlx5e_get_link_ext_stats
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5e41dfdf79c8..8d36e2de53a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -208,7 +208,7 @@ static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz;
- sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+ sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
return sz / MLX5_OCTWORD;
}
@@ -1206,6 +1206,13 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
+ /* For enhanced CQE compression packet processing. decompress
+ * session according to the enhanced layout.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) &&
+ MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
+
return 0;
err_destroy_rq:
@@ -1896,6 +1903,7 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1;
+ cqe->validity_iteration_count = 0xff;
}
cq->mdev = mdev;
@@ -3062,7 +3070,10 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err)
goto err_clear_state_opened_flag;
- priv->profile->update_rx(priv);
+ err = priv->profile->update_rx(priv);
+ if (err)
+ goto err_close_channels;
+
mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
@@ -3072,6 +3083,8 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_queue_update_stats(priv);
return 0;
+err_close_channels:
+ mlx5e_close_channels(&priv->channels);
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_selq_cancel(&priv->selq);
@@ -4898,7 +4911,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
};
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -5226,10 +5238,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
- err = mlx5e_ipsec_init(priv);
- if (err)
- mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
-
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5242,7 +5250,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
- mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
}
@@ -5371,6 +5378,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
+ mlx5e_ipsec_init(priv);
err = mlx5e_macsec_init(priv);
if (err)
@@ -5434,6 +5442,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
+ mlx5e_ipsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5940,16 +5949,16 @@ static int mlx5e_probe(struct auxiliary_device *adev,
goto err_profile_cleanup;
}
+ SET_NETDEV_DEVLINK_PORT(netdev, mlx5e_devlink_get_dl_port(priv));
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_resume;
}
- mlx5e_devlink_port_type_eth_set(priv);
-
mlx5e_dcbnl_init_app(priv);
mlx5_uplink_netdev_set(mdev, netdev);
+ mlx5e_params_print_info(mdev, &priv->channels.params);
return 0;
err_resume:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 794cd8dfe9c9..623886462c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -607,15 +607,6 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
-static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_core_dev *dev = priv->mdev;
-
- return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
-}
-
static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -644,7 +635,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -761,7 +751,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
@@ -770,10 +759,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
return -ENOMEM;
}
- err = mlx5e_ipsec_init(priv);
- if (err)
- mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
-
mlx5e_vxlan_set_netdev_info(priv);
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -783,7 +768,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
- mlx5e_ipsec_cleanup(priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -1122,6 +1106,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
+ mlx5e_ipsec_init(priv);
+
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
@@ -1168,6 +1154,8 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5e_rep_tc_disable(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
+
+ mlx5e_ipsec_cleanup(priv);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
@@ -1253,37 +1241,20 @@ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *
{
struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
- struct devlink_port *dl_port;
- int err;
rpriv->netdev = priv->netdev;
-
- err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
- if (err)
- return err;
-
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_eth_set(dl_port, rpriv->netdev);
-
- return 0;
+ return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
}
static void
mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
{
struct net_device *netdev = rpriv->netdev;
- struct devlink_port *dl_port;
- struct mlx5_core_dev *dev;
struct mlx5e_priv *priv;
priv = netdev_priv(netdev);
- dev = priv->mdev;
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_clear(dl_port);
mlx5e_netdev_attach_nic_profile(priv);
}
@@ -1326,6 +1297,11 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_cleanup_profile;
}
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
+ rpriv->rep->vport);
+ if (dl_port)
+ SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
+
err = register_netdev(netdev);
if (err) {
netdev_warn(netdev,
@@ -1334,9 +1310,6 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_eth_set(dl_port, netdev);
return 0;
err_detach_netdev:
@@ -1382,8 +1355,6 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *dev = priv->mdev;
- struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
@@ -1391,9 +1362,6 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
goto free_ppriv;
}
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index a61a43fc8d5c..c8820ab22169 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -89,6 +89,25 @@ static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
}
+static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ struct mlx5_cqe64 *title = &cqd->title;
+
+ memcpy(title, cqe, sizeof(struct mlx5_cqe64));
+
+ if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
+ return;
+
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
+ mpwrq_get_cqe_consumed_strides(title);
+ else
+ cqd->wqe_counter =
+ mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
+}
+
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq,
u32 cqcc)
@@ -175,6 +194,38 @@ static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
cqd->title.rss_hash_result = 0;
}
+static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqwq *wq,
+ struct mlx5_cqe64 *cqe,
+ int budget_rem)
+{
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ u32 cqcc, left;
+ u32 i;
+
+ left = get_cqe_enhanced_num_mini_cqes(cqe);
+ /* Here we avoid breaking the cqe compression session in the middle
+ * in case budget is not sufficient to handle all of it. In this case
+ * we return work_done == budget_rem to give 'busy' napi indication.
+ */
+ if (unlikely(left > budget_rem))
+ return budget_rem;
+
+ cqcc = wq->cc;
+ cqd->mini_arr_idx = 0;
+ memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
+ for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
+ mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, &cqd->title);
+ }
+ wq->cc = cqcc;
+ rq->stats->cqe_compress_pkts += left;
+
+ return left;
+}
+
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq,
int update_owner_only,
@@ -220,7 +271,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
rq, &cqd->title);
cqd->mini_arr_idx++;
- return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
+ return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
}
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
@@ -542,8 +593,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
- entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+ new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+ entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
@@ -552,7 +603,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
for (i = 0; i < entries; i++, index++) {
dma_info = &shampo->info[index];
if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KLM_ALIGNMENT))
+ MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
goto update_klm;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
@@ -617,8 +668,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
if (!klm_entries)
return 0;
- klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+ klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
entries_before = shampo->hd_per_wq - index;
if (unlikely(entries_before < klm_entries))
@@ -676,6 +727,17 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
}
+ /* Pad if needed, in case the value set to ucseg->xlt_octowords
+ * in mlx5e_build_umr_wqe() needed alignment.
+ */
+ if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
+ int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
+ rq->mpwqe.pages_per_wqe;
+
+ memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
+ sizeof(*umr_wqe->inline_mtts) * pad);
+ }
+
bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
@@ -2211,45 +2273,102 @@ mpwrq_cqe_out:
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
-int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
+ struct mlx5_cqwq *cqwq,
+ int budget_rem)
{
- struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5_cqwq *cqwq = &cq->wq;
- struct mlx5_cqe64 *cqe;
+ struct mlx5_cqe64 *cqe, *title_cqe = NULL;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
int work_done = 0;
- if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
- return 0;
+ cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
+ if (!cqe)
+ return work_done;
- if (rq->cqd.left) {
- work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
- if (work_done >= budget)
- goto out;
+ if (cqd->last_cqe_title &&
+ (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
+ rq->stats->cqe_compress_blks++;
+ cqd->last_cqe_title = false;
}
- cqe = mlx5_cqwq_get_cqe(cqwq);
- if (!cqe) {
- if (unlikely(work_done))
- goto out;
- return 0;
+ do {
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+ if (title_cqe) {
+ mlx5e_read_enhanced_title_slot(rq, title_cqe);
+ title_cqe = NULL;
+ rq->stats->cqe_compress_blks++;
+ }
+ work_done +=
+ mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
+ budget_rem - work_done);
+ continue;
+ }
+ title_cqe = cqe;
+ mlx5_cqwq_pop(cqwq);
+
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, cqe);
+ work_done++;
+ } while (work_done < budget_rem &&
+ (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
+
+ /* last cqe might be title on next poll bulk */
+ if (title_cqe) {
+ mlx5e_read_enhanced_title_slot(rq, title_cqe);
+ cqd->last_cqe_title = true;
}
- do {
+ return work_done;
+}
+
+static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
+ struct mlx5_cqwq *cqwq,
+ int budget_rem)
+{
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
+ if (rq->cqd.left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
+
+ while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done +=
mlx5e_decompress_cqes_start(rq, cqwq,
- budget - work_done);
+ budget_rem - work_done);
continue;
}
mlx5_cqwq_pop(cqwq);
-
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
rq, cqe);
- } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+ work_done++;
+ }
+
+ return work_done;
+}
+
+int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5_cqwq *cqwq = &cq->wq;
+ int work_done;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
+ return 0;
+
+ if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
+ work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
+ budget);
+ else
+ work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
+ budget);
+
+ if (work_done == 0)
+ return 0;
-out:
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, NULL, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 03c1841970f1..70c4ea3841d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1241,6 +1241,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+void mlx5e_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(priv->mdev, in, sz, out,
+ MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
+
+ stats->link_down_events = MLX5_GET(ppcnt_reg, out,
+ counter_set.phys_layer_cntrs.link_down_events);
+}
+
static int fec_num_lanes(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 9f781085be47..cbc831ca646b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -126,6 +126,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
struct ethtool_rmon_stats *rmon,
const struct ethtool_rmon_hist_range **ranges);
+void mlx5e_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats);
/* Concrete NIC Stats */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index bd9936af4582..227fa6ef9e41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -132,6 +132,15 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_jump_state {
+ u32 jump_count;
+ bool jump_target;
+ struct mlx5_flow_attr *jumping_attr;
+
+ enum flow_action_id last_id;
+ u32 last_index;
+};
+
struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
{
struct mlx5e_tc_table *tc;
@@ -160,6 +169,7 @@ static struct lock_class_key tc_ht_lock_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
+static void mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -412,8 +422,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
}
ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
- post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
- meter->red_counter);
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
+ meter->act_counter, meter->drop_counter,
+ attr->branch_true, attr->branch_false);
if (IS_ERR(post_meter)) {
mlx5_core_err(priv->mdev, "Failed to init post meter\n");
goto err_meter_init;
@@ -432,9 +443,9 @@ err_meter_init:
}
static void
-mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
- mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
mlx5e_tc_meter_put(attr->meter_attr.meter);
}
@@ -495,7 +506,7 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
if (attr->meter_attr.meter)
- mlx5e_tc_del_flow_meter(attr);
+ mlx5e_tc_del_flow_meter(esw, attr);
}
int
@@ -606,6 +617,12 @@ int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
}
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
@@ -1060,12 +1077,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock);
- params.log_data_size = 16;
- params.log_data_size = min_t(u8, params.log_data_size,
- MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.log_data_size = max_t(u8, params.log_data_size,
- MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
-
+ params.log_data_size = clamp_t(u8, 16,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.log_num_packets = params.log_data_size -
MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
params.log_num_packets = min_t(u8, params.log_num_packets,
@@ -1722,6 +1736,90 @@ clean_encap_dests(struct mlx5e_priv *priv,
}
static int
+verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
+{
+ if (!(actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+post_process_attr(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ bool is_post_act_attr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
+ bool vf_tun;
+ int err = 0;
+
+ err = verify_attr_actions(attr->action, extack);
+ if (err)
+ goto err_out;
+
+ err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+ if (err)
+ goto err_out;
+
+ if (mlx5e_is_eswitch_flow(flow)) {
+ err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (err)
+ goto err_out;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ if (vf_tun || is_post_act_attr) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+ if (err)
+ goto err_out;
+ } else {
+ err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ if (attr->branch_true &&
+ attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true);
+ if (err)
+ goto err_out;
+ }
+
+ if (attr->branch_false &&
+ attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false);
+ if (err)
+ goto err_out;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+ if (err)
+ goto err_out;
+ }
+
+err_out:
+ return err;
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1731,7 +1829,6 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
u32 max_prio, max_chain;
- bool vf_tun;
int err = 0;
parse_attr = attr->parse_attr;
@@ -1821,32 +1918,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
+ err = post_process_attr(flow, attr, false, extack);
if (err)
goto err_out;
- err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err)
- goto err_out;
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- if (vf_tun) {
- err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
- if (err)
- goto err_out;
- } else {
- err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- if (err)
- goto err_out;
- }
- }
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
- if (err)
- goto err_out;
- }
-
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
@@ -1882,6 +1957,16 @@ static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
return !!geneve_tlv_opt_0_data;
}
+static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ if (!attr)
+ return;
+
+ mlx5_free_flow_attr(flow, attr);
+ kvfree(attr->parse_attr);
+ kfree(attr);
+}
+
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -1937,6 +2022,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_detach_decap(priv, flow);
free_flow_post_acts(flow);
+ free_branch_attr(flow, attr->branch_true);
+ free_branch_attr(flow, attr->branch_false);
if (flow->attr->lag.count)
mlx5_lag_del_mpesw_rule(esw->dev);
@@ -3510,36 +3597,6 @@ actions_match_supported(struct mlx5e_priv *priv,
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- if (!(actions &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
- return false;
- }
-
- if (!(~actions &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
- return false;
- }
-
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
- return false;
- }
-
- if (!(~actions &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
- return false;
- }
-
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
- return false;
- }
-
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
@@ -3639,15 +3696,12 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->esw_attr->split_count = 0;
}
+ attr2->branch_true = NULL;
+ attr2->branch_false = NULL;
+ attr2->jumping_attr = NULL;
return attr2;
}
-static struct mlx5_core_dev *
-get_flow_counter_dev(struct mlx5e_tc_flow *flow)
-{
- return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
-}
-
struct mlx5_flow_attr *
mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
{
@@ -3683,28 +3737,15 @@ mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
static void
free_flow_post_acts(struct mlx5e_tc_flow *flow)
{
- struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
- struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *tmp;
- bool vf_tun;
list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
if (list_is_last(&attr->list, &flow->attrs))
break;
- if (attr->post_act_handle)
- mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
-
- clean_encap_dests(flow->priv, flow, attr, &vf_tun);
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(counter_dev, attr->counter);
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- if (attr->modify_hdr)
- mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
- }
+ mlx5_free_flow_attr(flow, attr);
+ free_branch_attr(flow, attr->branch_true);
+ free_branch_attr(flow, attr->branch_false);
list_del(&attr->list);
kvfree(attr->parse_attr);
@@ -3757,7 +3798,6 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle;
- bool vf_tun;
int err;
/* This is going in reverse order as needed.
@@ -3767,7 +3807,9 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (!next_attr) {
/* Set counter action on last post act rule. */
attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- } else {
+ }
+
+ if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
if (err)
goto out_free;
@@ -3779,26 +3821,14 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (list_is_last(&attr->list, &flow->attrs))
break;
- err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+ err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err)
goto out_free;
- err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+ err = post_process_attr(flow, attr, true, extack);
if (err)
goto out_free;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
- if (err)
- goto out_free;
- }
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
- if (err)
- goto out_free;
- }
-
handle = mlx5e_tc_post_act_add(post_act, attr);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
@@ -3806,6 +3836,13 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
}
attr->post_act_handle = handle;
+
+ if (attr->jumping_attr) {
+ err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
+ if (err)
+ goto out_free;
+ }
+
next_attr = attr;
}
@@ -3825,12 +3862,145 @@ out_free:
}
static int
+alloc_branch_attr(struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_act_branch_ctrl *cond,
+ struct mlx5_flow_attr **cond_attr,
+ u32 *jump_count,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_flow_attr *attr;
+ int err = 0;
+
+ *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
+ mlx5e_get_flow_namespace(flow));
+ if (!(*cond_attr))
+ return -ENOMEM;
+
+ attr = *cond_attr;
+
+ switch (cond->act_id) {
+ case FLOW_ACTION_DROP:
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ break;
+ case FLOW_ACTION_ACCEPT:
+ case FLOW_ACTION_PIPE:
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ break;
+ case FLOW_ACTION_JUMP:
+ if (*jump_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+ *jump_count = cond->extval;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ return err;
+out_err:
+ kfree(*cond_attr);
+ *cond_attr = NULL;
+ return err;
+}
+
+static void
+dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
+ struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
+ struct mlx5e_tc_jump_state *jump_state)
+{
+ if (!jump_state->jump_count)
+ return;
+
+ /* Single tc action can instantiate multiple offload actions (e.g. pedit)
+ * Jump only over a tc action
+ */
+ if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
+ return;
+
+ jump_state->last_id = act->id;
+ jump_state->last_index = act->hw_index;
+
+ /* nothing to do for intermediate actions */
+ if (--jump_state->jump_count > 1)
+ return;
+
+ if (jump_state->jump_count == 1) { /* last action in the jump action list */
+
+ /* create a new attribute after this action */
+ jump_state->jump_target = true;
+
+ if (tc_act->is_terminating_action) { /* the branch ends here */
+ attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else { /* the branch continues executing the rest of the actions */
+ struct mlx5e_post_act *post_act;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ post_act = get_post_action(priv);
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
+ }
+ } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
+ /* This is the post action for the jumping attribute (either red or green)
+ * Use the stored jumping_attr to set the post act id on the jumping attribute
+ */
+ attr->jumping_attr = jump_state->jumping_attr;
+ }
+}
+
+static int
+parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
+ struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_jump_state *jump_state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
+ u32 jump_count = jump_state->jump_count;
+ int err;
+
+ if (!tc_act->get_branch_ctrl)
+ return 0;
+
+ tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
+
+ err = alloc_branch_attr(flow, &cond_true,
+ &attr->branch_true, &jump_count, extack);
+ if (err)
+ goto out_err;
+
+ if (jump_count)
+ jump_state->jumping_attr = attr->branch_true;
+
+ err = alloc_branch_attr(flow, &cond_false,
+ &attr->branch_false, &jump_count, extack);
+ if (err)
+ goto err_branch_false;
+
+ if (jump_count && !jump_state->jumping_attr)
+ jump_state->jumping_attr = attr->branch_false;
+
+ jump_state->jump_count = jump_count;
+ return 0;
+
+err_branch_false:
+ free_branch_attr(flow, attr->branch_true);
+out_err:
+ return err;
+}
+
+static int
parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow_action flow_action_reorder;
struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_jump_state jump_state = {};
struct mlx5_flow_attr *attr = flow->attr;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_priv *priv = flow->priv;
@@ -3850,6 +4020,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
list_add(&attr->list, &flow->attrs);
flow_action_for_each(i, _act, &flow_action_reorder) {
+ jump_state.jump_target = false;
act = *_act;
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act) {
@@ -3867,12 +4038,19 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
goto out_free;
+ dec_jump_count(act, tc_act, attr, priv, &jump_state);
+
+ err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
+ if (err)
+ goto out_free;
+
parse_state->actions |= attr->action;
/* Split attr for multi table act if not the last act. */
- if (tc_act->is_multi_table_act &&
+ if (jump_state.jump_target ||
+ (tc_act->is_multi_table_act &&
tc_act->is_multi_table_act(priv, act, attr) &&
- i < flow_action_reorder.num_entries - 1) {
+ i < flow_action_reorder.num_entries - 1)) {
err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
if (err)
goto out_free;
@@ -3954,6 +4132,10 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
+ err = verify_attr_actions(attr->action, extack);
+ if (err)
+ return err;
+
if (!actions_match_supported(priv, flow_action, parse_state->actions,
parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -4191,6 +4373,30 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
return attr;
}
+static void
+mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ bool vf_tun;
+
+ if (!attr)
+ return;
+
+ if (attr->post_act_handle)
+ mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
+
+ clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(counter_dev, attr->counter);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ if (attr->modify_hdr)
+ mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ }
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct flow_cls_offload *f, unsigned long flow_flags,
@@ -4733,10 +4939,17 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
-int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+static int
+tc_matchall_police_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue");
+ return -EOPNOTSUPP;
+ }
+
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when exceed action is not drop");
@@ -4787,13 +5000,7 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not continue");
- return -EOPNOTSUPP;
- }
-
- err = mlx5e_policer_validate(flow_action, act, extack);
+ err = tc_matchall_police_validate(flow_action, act, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 48241317a535..f2677d9ca0b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -95,10 +95,13 @@ struct mlx5_flow_attr {
*/
bool count;
} lag;
+ struct mlx5_flow_attr *branch_true;
+ struct mlx5_flow_attr *branch_false;
+ struct mlx5_flow_attr *jumping_attr;
/* keep this union last */
union {
- struct mlx5_esw_flow_attr esw_attr[0];
- struct mlx5_nic_flow_attr nic_attr[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
+ DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr);
};
};
@@ -110,6 +113,7 @@ enum {
MLX5_ATTR_FLAG_SAMPLE = BIT(4),
MLX5_ATTR_FLAG_ACCEPT = BIT(5),
MLX5_ATTR_FLAG_CT = BIT(6),
+ MLX5_ATTR_FLAG_TERMINATING = BIT(7),
};
/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 4fbff7bcc155..b176648d1343 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1722,7 +1722,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_debug(br_offloads->esw->dev,
- "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
fdb_info->addr, fdb_info->vid, vport_num);
return;
}
@@ -1775,9 +1775,9 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
bridge = port->bridge;
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
- esw_warn(esw->dev,
- "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
- fdb_info->addr, fdb_info->vid, vport_num);
+ esw_debug(esw->dev,
+ "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ fdb_info->addr, fdb_info->vid, vport_num);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 9bc7be95db54..084a910bb4e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -91,7 +91,7 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
if (err)
goto reg_err;
- err = devl_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport, NULL);
if (err)
goto rate_err;
@@ -160,7 +160,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
if (err)
return err;
- err = devl_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport, NULL);
if (err)
goto rate_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 374e3fbdc2cf..527e4bffda8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -772,6 +772,41 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
+static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
+
+ memset(query_ctx, 0, query_out_sz);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
@@ -785,6 +820,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
+ err = mlx5_esw_vport_caps_get(esw, vport);
+ if (err)
+ goto err_caps;
+
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
@@ -804,6 +843,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
vport->info.qos, flags);
return 0;
+
+err_caps:
+ esw_vport_cleanup_acl(esw, vport);
+ return err;
}
/* Don't cleanup vport->info, it's needed to restore vport configuration */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3029bc1c0dd0..5a85a5d32be7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -153,6 +153,8 @@ struct mlx5_vport_info {
u8 qos;
u8 spoofchk: 1;
u8 trusted: 1;
+ u8 roce_enabled: 1;
+ u8 mig_enabled: 1;
};
/* Vport context events */
@@ -508,7 +510,14 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack);
-
+int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -744,6 +753,11 @@ static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
return 0;
}
+static inline struct mlx5_flow_table *
+mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.offloads.slow_fdb;
+}
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8c6c9bcb3dc3..1987a9d9d40c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -248,7 +248,7 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+ dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
}
static int
@@ -479,13 +479,15 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
- if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
- !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
- esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
- (*i)++;
- } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
+ goto out;
+ }
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
+ esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
+ (*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
@@ -506,6 +508,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
+out:
return err;
}
@@ -637,6 +640,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
+ if (!i) {
+ kfree(dest);
+ dest = NULL;
+ }
+
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
@@ -1046,7 +1054,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
if (rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
- flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
@@ -1095,7 +1103,7 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
@@ -1248,7 +1256,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
@@ -1260,7 +1268,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
@@ -1274,7 +1282,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_dev->priv.eswitch,
spec, vport->vport);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
@@ -1363,7 +1371,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -1378,7 +1386,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -1927,7 +1935,7 @@ send_vport_err:
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
tc_miss_table_err:
- mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
+ mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
@@ -1938,7 +1946,7 @@ ns_err:
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.offloads.slow_fdb)
+ if (!mlx5_eswitch_get_slow_fdb(esw))
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
@@ -1954,7 +1962,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
- mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
+ mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
@@ -3886,7 +3894,7 @@ static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num,
if (!query_ctx)
return -ENOMEM;
- err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
+ err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
if (err)
goto out_free;
@@ -4019,3 +4027,212 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
}
+
+static struct mlx5_vport *
+mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
+{
+ u16 vport_num;
+
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return mlx5_eswitch_get_vport(esw, vport_num);
+}
+
+int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = -EOPNOTSUPP;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!MLX5_CAP_GEN(esw->dev, migration)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ if (vport->enabled) {
+ *is_enabled = vport->info.mig_enabled;
+ err = 0;
+ }
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ void *query_ctx;
+ void *hca_caps;
+ int err = -EOPNOTSUPP;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!MLX5_CAP_GEN(esw->dev, migration)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto out;
+ }
+
+ if (vport->info.mig_enabled == enable) {
+ err = 0;
+ goto out;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out_free;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
+ goto out_free;
+ }
+
+ vport->info.mig_enabled = enable;
+
+out_free:
+ kfree(query_ctx);
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = -EOPNOTSUPP;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ if (vport->enabled) {
+ *is_enabled = vport->info.roce_enabled;
+ err = 0;
+ }
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = -EOPNOTSUPP;
+ void *query_ctx;
+ void *hca_caps;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+ vport_num = vport->vport;
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto out;
+ }
+
+ if (vport->info.roce_enabled == enable) {
+ err = 0;
+ goto out;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out_free;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
+ goto out_free;
+ }
+
+ vport->info.roce_enabled = enable;
+
+out_free:
+ kfree(query_ctx);
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index edd910258314..3a9a6bb9158d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -210,6 +210,18 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
+static bool
+mlx5_eswitch_is_push_vlan_no_cap(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act)
+{
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
+ return true;
+
+ return false;
+}
+
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
@@ -225,10 +237,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
- /* push vlan on RX */
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
- !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
- MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
+ if (mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act))
return true;
/* hairpin */
@@ -252,19 +261,31 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_act term_tbl_act = {};
struct mlx5_flow_handle *rule = NULL;
bool term_table_created = false;
+ bool is_push_vlan_on_rx;
int num_vport_dests = 0;
int i, curr_dest;
+ is_push_vlan_on_rx = mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act);
mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < num_dest; i++) {
struct mlx5_termtbl_handle *tt;
+ bool hairpin = false;
/* only vport destinations can be terminated */
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].rep &&
+ attr->dests[num_vport_dests].rep->vport == MLX5_VPORT_UPLINK)
+ hairpin = true;
+
+ if (!is_push_vlan_on_rx && !hairpin) {
+ num_vport_dests++;
+ continue;
+ }
+
if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
@@ -312,6 +333,9 @@ revert_changes:
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+ if (!tt)
+ continue;
+
attr->dests[curr_dest].termtbl = NULL;
/* search for the destination associated with the
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d53749248fa0..d53190f22871 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1962,6 +1962,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->fg && ft->autogroup.active)
return ERR_PTR(-EINVAL);
+ if (dest && dest_num <= 0)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 4e3a75496dd9..7c5c500fd215 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -561,12 +561,17 @@ static int mlx5i_open(struct net_device *netdev)
if (err)
goto err_remove_fs_underlay_qp;
- epriv->profile->update_rx(epriv);
+ err = epriv->profile->update_rx(epriv);
+ if (err)
+ goto err_close_channels;
+
mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
+err_close_channels:
+ mlx5e_close_channels(&epriv->channels);
err_remove_fs_underlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_reset_qp:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 0227a521d301..4d9c9e49645c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -221,12 +221,16 @@ static int mlx5i_pkey_open(struct net_device *netdev)
mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
goto err_clear_state_opened_flag;
}
- epriv->profile->update_rx(epriv);
+ err = epriv->profile->update_rx(epriv);
+ if (err)
+ goto err_close_channels;
mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
+err_close_channels:
+ mlx5e_close_channels(&epriv->channels);
err_clear_state_opened_flag:
mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index c971ff04dd04..0f9e4f01c85a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -334,9 +334,6 @@ err_cq:
void mlx5_aso_destroy(struct mlx5_aso *aso)
{
- if (IS_ERR_OR_NULL(aso))
- return;
-
mlx5_aso_destroy_sq(aso);
mlx5_aso_destroy_cq(&aso->cq);
kfree(aso);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d3a9ae80fd30..69cfe60c558a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -339,35 +339,25 @@ static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int neg_adj = 0;
- u32 diff;
- u64 adj;
+ u32 mult;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjfreq_real_time(mdev, delta);
+ err = mlx5_ptp_adjfreq_real_time(mdev, scaled_ppm_to_ppb(scaled_ppm));
if (err)
return err;
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
-
- adj = timer->nominal_c_mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
+ mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&timer->tc);
- timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
- timer->nominal_c_mult + diff;
+ timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
@@ -697,7 +687,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = mlx5_ptp_adjfreq,
+ .adjfine = mlx5_ptp_adjfine,
.adjtime = mlx5_ptp_adjtime,
.gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e58775a7d955..7f5db13e3550 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -37,7 +37,6 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
@@ -1306,8 +1305,15 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_sf_dev_table_create(dev);
+ err = mlx5_devlink_traps_register(priv_to_devlink(dev));
+ if (err)
+ goto err_traps_reg;
+
return 0;
+err_traps_reg:
+ mlx5_sf_dev_table_destroy(dev);
+ mlx5_sriov_detach(dev);
err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
@@ -1336,6 +1342,7 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_eswitch_disable(dev->priv.eswitch);
@@ -1580,14 +1587,22 @@ err:
return -ENOMEM;
}
+static int vhca_id_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+
+ seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(vhca_id);
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
int err;
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
- INIT_LIST_HEAD(&priv->ctx_list);
- spin_lock_init(&priv->ctx_lock);
lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
@@ -1604,6 +1619,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
+ debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a806e3de7b7c..029305a8b80a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -324,7 +324,10 @@ void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
-int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
+int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id,
+ u16 opmod);
+#define mlx5_vport_get_other_func_general_cap(dev, fid, out) \
+ mlx5_vport_get_other_func_cap(dev, fid, out, MLX5_CAP_GENERAL)
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 662f1d55e30e..6bde18bcd42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -4,6 +4,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
#include "pci_irq.h"
@@ -101,7 +102,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
goto out;
}
- ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ ret = mlx5_vport_get_other_func_general_cap(dev, function_id, query_cap);
if (ret)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
index 7df11a019df9..fe228d948b47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
@@ -15,8 +15,6 @@ int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
buddy->max_order = max_order;
INIT_LIST_HEAD(&buddy->list_node);
- INIT_LIST_HEAD(&buddy->used_list);
- INIT_LIST_HEAD(&buddy->hot_list);
buddy->bitmap = kcalloc(buddy->max_order + 1,
sizeof(*buddy->bitmap),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 16d65fe4f654..b4739eafc180 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -271,6 +271,13 @@ int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
+ /* Skip SYNC in case the device is internal error state.
+ * Besides a device error, this also happens when we're
+ * in fast teardown
+ */
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return 0;
+
MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
return mlx5_cmd_exec_in(mdev, sync_steering, in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index fc6ae49b5ecc..9a9836218c8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -56,6 +56,70 @@ int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
return 0;
}
+static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
+ sizeof(struct mlx5dr_icm_chunk), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dmn->chunks_kmem_cache) {
+ mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
+ return -ENOMEM;
+ }
+
+ dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
+ sizeof(struct mlx5dr_ste_htbl), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dmn->htbls_kmem_cache) {
+ mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
+ ret = -ENOMEM;
+ goto free_chunks_kmem_cache;
+ }
+
+ dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
+ if (!dmn->ste_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get icm memory\n");
+ ret = -ENOMEM;
+ goto free_htbls_kmem_cache;
+ }
+
+ dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
+ if (!dmn->action_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get action icm memory\n");
+ ret = -ENOMEM;
+ goto free_ste_icm_pool;
+ }
+
+ ret = mlx5dr_send_info_pool_create(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create send info pool\n");
+ goto free_action_icm_pool;
+ }
+
+ return 0;
+
+free_action_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+free_ste_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+free_htbls_kmem_cache:
+ kmem_cache_destroy(dmn->htbls_kmem_cache);
+free_chunks_kmem_cache:
+ kmem_cache_destroy(dmn->chunks_kmem_cache);
+
+ return ret;
+}
+
+static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_send_info_pool_destroy(dmn);
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ kmem_cache_destroy(dmn->htbls_kmem_cache);
+ kmem_cache_destroy(dmn->chunks_kmem_cache);
+}
+
static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
@@ -79,32 +143,22 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
goto clean_pd;
}
- dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
- if (!dmn->ste_icm_pool) {
- mlx5dr_err(dmn, "Couldn't get icm memory\n");
- ret = -ENOMEM;
+ ret = dr_domain_init_mem_resources(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
goto clean_uar;
}
- dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
- if (!dmn->action_icm_pool) {
- mlx5dr_err(dmn, "Couldn't get action icm memory\n");
- ret = -ENOMEM;
- goto free_ste_icm_pool;
- }
-
ret = mlx5dr_send_ring_alloc(dmn);
if (ret) {
mlx5dr_err(dmn, "Couldn't create send-ring\n");
- goto free_action_icm_pool;
+ goto clean_mem_resources;
}
return 0;
-free_action_icm_pool:
- mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
-free_ste_icm_pool:
- mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+clean_mem_resources:
+ dr_domain_uninit_mem_resources(dmn);
clean_uar:
mlx5_put_uars_page(dmn->mdev, dmn->uar);
clean_pd:
@@ -116,8 +170,7 @@ clean_pd:
static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
{
mlx5dr_send_ring_free(dmn, dmn->send_ring);
- mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
- mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ dr_domain_uninit_mem_resources(dmn);
mlx5_put_uars_page(dmn->mdev, dmn->uar);
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 4ca67fa24cc6..3eb6719bc8eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,14 +4,30 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
+#define DR_ICM_POOL_HOT_MEMORY_FRACTION 4
+
+struct mlx5dr_icm_hot_chunk {
+ struct mlx5dr_icm_buddy_mem *buddy_mem;
+ unsigned int seg;
+ enum mlx5dr_icm_chunk_size size;
+};
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_domain *dmn;
+ struct kmem_cache *chunks_kmem_cache;
+
/* memory management */
struct mutex mutex; /* protect the ICM pool and ICM buddy */
struct list_head buddy_mem_list;
+
+ /* Hardware may be accessing this memory but at some future,
+ * undetermined time, it might cease to do so.
+ * sync_ste command sets them free.
+ */
+ struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
+ u32 hot_chunks_num;
u64 hot_memory_size;
};
@@ -177,46 +193,20 @@ static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
{
+ int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int ste_size = dr_icm_buddy_get_ste_size(buddy);
int index = offset / DR_STE_SIZE;
chunk->ste_arr = &buddy->ste_arr[index];
chunk->miss_list = &buddy->miss_list[index];
- chunk->hw_ste_arr = buddy->hw_ste_arr +
- index * dr_icm_buddy_get_ste_size(buddy);
-}
+ chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
-static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
-{
- int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
-
- memset(chunk->hw_ste_arr, 0,
- num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
memset(chunk->ste_arr, 0,
num_of_entries * sizeof(chunk->ste_arr[0]));
}
-static enum mlx5dr_icm_type
-get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
-{
- return chunk->buddy_mem->pool->icm_type;
-}
-
-static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
- struct mlx5dr_icm_buddy_mem *buddy)
-{
- enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
-
- buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- list_del(&chunk->chunk_list);
-
- if (icm_type == DR_ICM_TYPE_STE)
- dr_icm_chunk_ste_cleanup(chunk);
-
- kvfree(chunk);
-}
-
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
int num_of_entries =
@@ -296,14 +286,6 @@ free_mr:
static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
{
- struct mlx5dr_icm_chunk *chunk, *next;
-
- list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
- dr_icm_chunk_destroy(chunk, buddy);
-
- list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
- dr_icm_chunk_destroy(chunk, buddy);
-
dr_icm_pool_mr_destroy(buddy->icm_mr);
mlx5dr_buddy_cleanup(buddy);
@@ -314,53 +296,62 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
kvfree(buddy);
}
-static struct mlx5dr_icm_chunk *
-dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
- enum mlx5dr_icm_chunk_size chunk_size,
- struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
- unsigned int seg)
+static void
+dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
+ struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
+ unsigned int seg)
{
- struct mlx5dr_icm_chunk *chunk;
int offset;
- chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
- if (!chunk)
- return NULL;
-
- offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
-
chunk->seg = seg;
chunk->size = chunk_size;
chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE)
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
dr_icm_chunk_ste_init(chunk, offset);
+ }
buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- INIT_LIST_HEAD(&chunk->chunk_list);
-
- /* chunk now is part of the used_list */
- list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
-
- return chunk;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
int allow_hot_size;
- /* sync when hot memory reaches half of the pool size */
+ /* sync when hot memory reaches a certain fraction of the pool size */
allow_hot_size =
mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) / 2;
+ pool->icm_type) /
+ DR_ICM_POOL_HOT_MEMORY_FRACTION;
return pool->hot_memory_size > allow_hot_size;
}
+static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
+{
+ struct mlx5dr_icm_hot_chunk *hot_chunk;
+ u32 i, num_entries;
+
+ for (i = 0; i < pool->hot_chunks_num; i++) {
+ hot_chunk = &pool->hot_chunks_arr[i];
+ num_entries = mlx5dr_icm_pool_chunk_size_to_entries(hot_chunk->size);
+ mlx5dr_buddy_free_mem(hot_chunk->buddy_mem,
+ hot_chunk->seg, ilog2(num_entries));
+ hot_chunk->buddy_mem->used_memory -=
+ mlx5dr_icm_pool_chunk_size_to_byte(hot_chunk->size,
+ pool->icm_type);
+ }
+
+ pool->hot_chunks_num = 0;
+ pool->hot_memory_size = 0;
+}
+
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
- u32 num_entries;
int err;
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -369,16 +360,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
return err;
}
- list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
- struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
-
- list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
- num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
- pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- dr_icm_chunk_destroy(chunk, buddy);
- }
+ dr_icm_pool_clear_hot_chunks_arr(pool);
+ list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_buddy_destroy(buddy);
}
@@ -452,10 +436,12 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
if (ret)
goto out;
- chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
+ chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
if (!chunk)
goto out_err;
+ dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
+
goto out;
out_err:
@@ -469,12 +455,23 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
{
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
struct mlx5dr_icm_pool *pool = buddy->pool;
+ struct mlx5dr_icm_hot_chunk *hot_chunk;
+ struct kmem_cache *chunks_cache;
+
+ chunks_cache = pool->chunks_kmem_cache;
- /* move the memory to the waiting list AKA "hot" */
+ /* move the chunk to the waiting chunks array, AKA "hot" memory */
mutex_lock(&pool->mutex);
- list_move_tail(&chunk->chunk_list, &buddy->hot_list);
+
pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
+ hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++];
+ hot_chunk->buddy_mem = chunk->buddy_mem;
+ hot_chunk->seg = chunk->seg;
+ hot_chunk->size = chunk->size;
+
+ kmem_cache_free(chunks_cache, chunk);
+
/* Check if we have chunks that are waiting for sync-ste */
if (dr_icm_pool_is_sync_required(pool))
dr_icm_pool_sync_all_buddy_pools(pool);
@@ -482,9 +479,20 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
mutex_unlock(&pool->mutex);
}
+struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool)
+{
+ return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL);
+}
+
+void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl)
+{
+ kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl);
+}
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type)
{
+ u32 num_of_chunks, entry_size, max_hot_size;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_icm_pool *pool;
@@ -500,21 +508,43 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
pool->dmn = dmn;
pool->icm_type = icm_type;
pool->max_log_chunk_sz = max_log_chunk_sz;
+ pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
INIT_LIST_HEAD(&pool->buddy_mem_list);
mutex_init(&pool->mutex);
+ entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
+
+ max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) /
+ DR_ICM_POOL_HOT_MEMORY_FRACTION;
+
+ num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
+
+ pool->hot_chunks_arr = kvcalloc(num_of_chunks,
+ sizeof(struct mlx5dr_icm_hot_chunk),
+ GFP_KERNEL);
+ if (!pool->hot_chunks_arr)
+ goto free_pool;
+
return pool;
+
+free_pool:
+ kvfree(pool);
+ return NULL;
}
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ dr_icm_pool_clear_hot_chunks_arr(pool);
+
list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
dr_icm_buddy_destroy(buddy);
+ kvfree(pool->hot_chunks_arr);
mutex_destroy(&pool->mutex);
kvfree(pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 91ff19f67695..7879991048ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -3,13 +3,16 @@
#include "dr_types.h"
-#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
+#define DR_RULE_MAX_STES_OPTIMIZED 5
+#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
-static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
+static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
+ enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info_last;
struct mlx5dr_ste *last_ste;
@@ -17,7 +20,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
WARN_ON(!last_ste);
- ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
+ ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
if (!ste_info_last)
return -ENOMEM;
@@ -120,7 +123,7 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
goto out;
out:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
return ret;
}
@@ -191,8 +194,8 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
- ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
- mlx5dr_ste_get_miss_list(col_ste),
+ ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
+ new_ste, mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
mlx5dr_dbg(dmn, "Failed update dup entry\n");
@@ -278,7 +281,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_htbl->ctrl.num_of_valid_entries++;
if (use_update_list) {
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
goto err_exit;
@@ -357,6 +361,15 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
update_list);
if (err)
goto clean_copy;
+
+ /* In order to decrease the number of allocated ste_send_info
+ * structs, send the current table row now.
+ */
+ err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
+ if (err) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
+ goto clean_copy;
+ }
}
clean_copy:
@@ -387,7 +400,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
return NULL;
@@ -473,13 +487,13 @@ free_ste_list:
list_for_each_entry_safe(del_ste_info, tmp_ste_info,
&rehash_table_send_list, send_list) {
list_del(&del_ste_info->send_list);
- kfree(del_ste_info);
+ mlx5dr_send_info_free(del_ste_info);
}
free_new_htbl:
mlx5dr_ste_htbl_free(new_htbl);
free_ste_info:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
mlx5dr_info(dmn, "Failed creating rehash table\n");
return NULL;
}
@@ -512,11 +526,11 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *send_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
return NULL;
@@ -524,8 +538,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
- if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
- miss_list, send_list)) {
+ if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
+ new_ste, miss_list, send_list)) {
mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
@@ -541,7 +555,7 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
err_exit:
mlx5dr_ste_free(new_ste, matcher, nic_matcher);
free_send_info:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
return NULL;
}
@@ -721,8 +735,8 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
list_add_tail(&action_ste->miss_list_node,
mlx5dr_ste_get_miss_list(action_ste));
- ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
- GFP_KERNEL);
+ ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info_arr[k])
goto err_exit;
@@ -772,7 +786,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste->ste_chain_location = ste_location;
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
goto clean_ste_setting;
@@ -793,7 +808,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
return 0;
clean_ste_info:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
clean_ste_setting:
list_del_init(&ste->miss_list_node);
mlx5dr_htbl_put(cur_htbl);
@@ -1089,6 +1104,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
size_t num_actions,
struct mlx5dr_action *actions[])
{
+ u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
@@ -1098,6 +1114,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_ste_htbl *cur_htbl;
struct mlx5dr_ste *ste = NULL;
LIST_HEAD(send_ste_list);
+ bool hw_ste_arr_is_opt;
u8 *hw_ste_arr = NULL;
u32 new_hw_ste_arr_sz;
int ret, i;
@@ -1109,9 +1126,23 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
- hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr)
- return -ENOMEM;
+ ret = mlx5dr_matcher_select_builders(matcher,
+ nic_matcher,
+ dr_rule_get_ipv(&param->outer),
+ dr_rule_get_ipv(&param->inner));
+ if (ret)
+ return ret;
+
+ hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
+ if (likely(hw_ste_arr_is_opt)) {
+ hw_ste_arr = hw_ste_arr_optimized;
+ } else {
+ hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
+ DR_STE_SIZE, GFP_KERNEL);
+
+ if (!hw_ste_arr)
+ return -ENOMEM;
+ }
mlx5dr_domain_nic_lock(nic_dmn);
@@ -1119,13 +1150,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (ret)
goto free_hw_ste;
- ret = mlx5dr_matcher_select_builders(matcher,
- nic_matcher,
- dr_rule_get_ipv(&param->outer),
- dr_rule_get_ipv(&param->inner));
- if (ret)
- goto remove_from_nic_tbl;
-
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret)
@@ -1187,7 +1211,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- kfree(hw_ste_arr);
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
return 0;
@@ -1196,7 +1221,7 @@ free_rule:
/* Clean all ste_info's */
list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
list_del(&ste_info->send_list);
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
}
remove_from_nic_tbl:
@@ -1205,7 +1230,10 @@ remove_from_nic_tbl:
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
- kfree(hw_ste_arr);
+
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index ef19a66f5233..a4476cb4c3b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -7,6 +7,7 @@
#define QUEUE_SIZE 128
#define SIGNAL_PER_DIV_QUEUE 16
#define TH_NUMS_TO_DRAIN 2
+#define DR_SEND_INFO_POOL_SIZE 1000
enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
@@ -49,6 +50,136 @@ struct dr_qp_init_attr {
u8 isolate_vl_tc:1;
};
+struct mlx5dr_send_info_pool_obj {
+ struct mlx5dr_ste_send_info ste_send_info;
+ struct mlx5dr_send_info_pool *pool;
+ struct list_head list_node;
+};
+
+struct mlx5dr_send_info_pool {
+ struct list_head free_list;
+};
+
+static int dr_send_info_pool_fill(struct mlx5dr_send_info_pool *pool)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
+ int i;
+
+ for (i = 0; i < DR_SEND_INFO_POOL_SIZE; i++) {
+ pool_obj = kzalloc(sizeof(*pool_obj), GFP_KERNEL);
+ if (!pool_obj)
+ goto clean_pool;
+
+ pool_obj->pool = pool;
+ list_add_tail(&pool_obj->list_node, &pool->free_list);
+ }
+
+ return 0;
+
+clean_pool:
+ list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
+ list_del(&pool_obj->list_node);
+ kfree(pool_obj);
+ }
+
+ return -ENOMEM;
+}
+
+static void dr_send_info_pool_destroy(struct mlx5dr_send_info_pool *pool)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
+
+ list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
+ list_del(&pool_obj->list_node);
+ kfree(pool_obj);
+ }
+
+ kfree(pool);
+}
+
+void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn)
+{
+ dr_send_info_pool_destroy(dmn->send_info_pool_tx);
+ dr_send_info_pool_destroy(dmn->send_info_pool_rx);
+}
+
+static struct mlx5dr_send_info_pool *dr_send_info_pool_create(void)
+{
+ struct mlx5dr_send_info_pool *pool;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&pool->free_list);
+
+ ret = dr_send_info_pool_fill(pool);
+ if (ret) {
+ kfree(pool);
+ return NULL;
+ }
+
+ return pool;
+}
+
+int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn)
+{
+ dmn->send_info_pool_rx = dr_send_info_pool_create();
+ if (!dmn->send_info_pool_rx)
+ return -ENOMEM;
+
+ dmn->send_info_pool_tx = dr_send_info_pool_create();
+ if (!dmn->send_info_pool_tx) {
+ dr_send_info_pool_destroy(dmn->send_info_pool_rx);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+struct mlx5dr_ste_send_info
+*mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
+ enum mlx5dr_domain_nic_type nic_type)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj;
+ struct mlx5dr_send_info_pool *pool;
+ int ret;
+
+ pool = nic_type == DR_DOMAIN_NIC_TYPE_RX ? dmn->send_info_pool_rx :
+ dmn->send_info_pool_tx;
+
+ if (unlikely(list_empty(&pool->free_list))) {
+ ret = dr_send_info_pool_fill(pool);
+ if (ret)
+ return NULL;
+ }
+
+ pool_obj = list_first_entry_or_null(&pool->free_list,
+ struct mlx5dr_send_info_pool_obj,
+ list_node);
+
+ if (likely(pool_obj)) {
+ list_del_init(&pool_obj->list_node);
+ } else {
+ WARN_ONCE(!pool_obj, "Failed getting ste send info obj from pool");
+ return NULL;
+ }
+
+ return &pool_obj->ste_send_info;
+}
+
+void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj;
+
+ pool_obj = container_of(ste_send_info,
+ struct mlx5dr_send_info_pool_obj,
+ ste_send_info);
+
+ list_add(&pool_obj->list_node, &pool_obj->pool->free_list);
+}
+
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
{
unsigned int idx;
@@ -78,8 +209,15 @@ static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
int err;
cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
- if (!cqe64)
+ if (!cqe64) {
+ if (unlikely(dr_cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(dr_cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
return CQ_EMPTY;
+ }
mlx5_cqwq_pop(&dr_cq->wq);
err = dr_parse_cqe(dr_cq, cqe64);
@@ -833,6 +971,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.vector = 0;
cq->mcq.uar = uar;
+ cq->mdev = mdev;
return cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 09ebd3088857..9e19a8dc9022 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -491,7 +491,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
u32 num_entries;
int i;
- htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ htbl = mlx5dr_icm_pool_alloc_htbl(pool);
if (!htbl)
return NULL;
@@ -503,6 +503,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->lu_type = lu_type;
htbl->byte_mask = byte_mask;
htbl->refcount = 0;
+ htbl->pointing_ste = NULL;
+ htbl->ctrl.num_of_valid_entries = 0;
+ htbl->ctrl.num_of_collisions = 0;
num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
for (i = 0; i < num_entries; i++) {
@@ -517,17 +520,20 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
return htbl;
out_free_htbl:
- kfree(htbl);
+ mlx5dr_icm_pool_free_htbl(pool, htbl);
return NULL;
}
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{
+ struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool;
+
if (htbl->refcount)
return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk);
- kfree(htbl);
+ mlx5dr_icm_pool_free_htbl(pool, htbl);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index f68461b13391..69294a66fd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -295,7 +295,7 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret)
- return ret;
+ mlx5dr_err(tbl->dmn, "Failed to destroy sw owned table\n");
dr_table_uninit(tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1777a1e508e7..41a37b9ac98b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -146,6 +146,8 @@ struct mlx5dr_cmd_caps;
struct mlx5dr_rule_rx_tx;
struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
+struct mlx5dr_send_info_pool;
+struct mlx5dr_icm_hot_chunk;
struct mlx5dr_ste {
/* refcount: indicates the num of rules that using this ste */
@@ -912,6 +914,10 @@ struct mlx5dr_domain {
refcount_t refcount;
struct mlx5dr_icm_pool *ste_icm_pool;
struct mlx5dr_icm_pool *action_icm_pool;
+ struct mlx5dr_send_info_pool *send_info_pool_rx;
+ struct mlx5dr_send_info_pool *send_info_pool_tx;
+ struct kmem_cache *chunks_kmem_cache;
+ struct kmem_cache *htbls_kmem_cache;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
@@ -1105,7 +1111,6 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
- struct list_head chunk_list;
/* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy
@@ -1158,6 +1163,9 @@ u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool);
+void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl);
+
static inline int
mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
{
@@ -1404,6 +1412,12 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
+int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
+void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
+struct mlx5dr_ste_send_info *mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
+ enum mlx5dr_domain_nic_type nic_type);
+void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info);
+
struct mlx5dr_cmd_ft_info {
u32 id;
u16 vport;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 226a0d7bb06d..84ed77763b21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -164,16 +164,9 @@ struct mlx5dr_icm_buddy_mem {
struct mlx5dr_icm_mr *icm_mr;
struct mlx5dr_icm_pool *pool;
- /* This is the list of used chunks. HW may be accessing this memory */
- struct list_head used_list;
+ /* Amount of memory in used chunks - HW may be accessing this memory */
u64 used_memory;
- /* Hardware may be accessing this memory but at some future,
- * undetermined time, it might cease to do so.
- * sync_ste command sets them free.
- */
- struct list_head hot_list;
-
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
struct list_head *miss_list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8455e79bc44a..1513112ecec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d5c317325030..ba7e3df22413 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1160,14 +1160,40 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
-int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out,
+ u16 opmod)
{
- u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+ opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
MLX5_SET(query_hca_cap_in, in, function_id, function_id);
MLX5_SET(query_hca_cap_in, in, other_function, true);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
+EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+
+int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
+ u16 function_id, u16 opmod)
+{
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
+ void *set_ctx;
+ int ret;
+
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ if (!set_ctx)
+ return -ENOMEM;
+
+ MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
+ MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
+
+ kfree(set_ctx);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 4d629e5ddbc7..e4ef1d24a3ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -243,6 +243,23 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe;
}
+static inline
+struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq)
+{
+ u8 sw_validity_iteration_count = mlx5_cqwq_get_wrap_cnt(wq) & 0xff;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_wqe(wq, ci);
+ if (cqe->validity_iteration_count != sw_validity_iteration_count)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit/validity byte */
+ dma_rmb();
+
+ return cqe;
+}
+
static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e2a985ec2c76..a0a06e2eff82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1459,11 +1459,6 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
char buf[32];
int err;
- err = devlink_info_driver_name_put(req,
- mlxsw_core->bus_info->device_kind);
- if (err)
- return err;
-
mlxsw_reg_mgir_pack(mgir_pl);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
if (err)
@@ -3172,29 +3167,17 @@ void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv, struct net_device *dev)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_type_eth_set(devlink_port, dev);
-}
-EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv)
+void mlxsw_core_port_netdev_link(struct mlxsw_core *mlxsw_core, u16 local_port,
+ void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_type_clear(devlink_port);
+ SET_NETDEV_DEVLINK_PORT(dev, devlink_port);
}
-EXPORT_SYMBOL(mlxsw_core_port_clear);
+EXPORT_SYMBOL(mlxsw_core_port_netdev_link);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index ca0c3d2bee6b..e0a6fcbbcb19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -264,10 +264,9 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
const unsigned char *switch_id,
unsigned char switch_id_len);
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv);
+void mlxsw_core_port_netdev_link(struct mlxsw_core *mlxsw_core, u16 local_port,
+ void *port_driver_priv,
+ struct net_device *dev);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index f5f5f8dc3d19..2c586c2308ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -632,9 +632,9 @@ static const struct mlxsw_bus mlxsw_i2c_bus = {
.cmd_exec = mlxsw_i2c_cmd_exec,
};
-static int mlxsw_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mlxsw_i2c_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
struct mlxsw_i2c *mlxsw_i2c;
u8 status;
@@ -751,7 +751,7 @@ static void mlxsw_i2c_remove(struct i2c_client *client)
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
{
- i2c_driver->probe = mlxsw_i2c_probe;
+ i2c_driver->probe_new = mlxsw_i2c_probe;
i2c_driver->remove = mlxsw_i2c_remove;
return i2c_add_driver(i2c_driver);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 55b3c42bb007..6b56eadd736e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -81,20 +81,9 @@ static int mlxsw_m_port_stop(struct net_device *dev)
return 0;
}
-static struct devlink_port *
-mlxsw_m_port_get_devlink_port(struct net_device *dev)
-{
- struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
- struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
-
- return mlxsw_core_port_devlink_port_get(mlxsw_m->core,
- mlxsw_m_port->local_port);
-}
-
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
.ndo_open = mlxsw_m_port_open,
.ndo_stop = mlxsw_m_port_stop,
- .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
@@ -265,6 +254,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 slot_index,
SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev);
dev_net_set(dev, mlxsw_core_net(mlxsw_m->core));
mlxsw_m_port = netdev_priv(dev);
+ mlxsw_core_port_netdev_link(mlxsw_m->core, local_port,
+ mlxsw_m_port, dev);
mlxsw_m_port->dev = dev;
mlxsw_m_port->mlxsw_m = mlxsw_m;
mlxsw_m_port->local_port = local_port;
@@ -298,9 +289,6 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 slot_index,
goto err_register_netdev;
}
- mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port,
- mlxsw_m_port, dev);
-
return 0;
err_register_netdev:
@@ -316,7 +304,6 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
{
struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port];
- mlxsw_core_port_clear(mlxsw_m->core, local_port, mlxsw_m);
unregister_netdev(mlxsw_m_port->dev); /* This calls ndo_stop */
mlxsw_m->ports[local_port] = NULL;
free_netdev(mlxsw_m_port->dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0777bed5bb1a..f2d6f8654e04 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2046,6 +2046,39 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port,
}
}
+/* SPFSR - Switch Port FDB Security Register
+ * -----------------------------------------
+ * Configures the security mode per port.
+ */
+#define MLXSW_REG_SPFSR_ID 0x2023
+#define MLXSW_REG_SPFSR_LEN 0x08
+
+MLXSW_REG_DEFINE(spfsr, MLXSW_REG_SPFSR_ID, MLXSW_REG_SPFSR_LEN);
+
+/* reg_spfsr_local_port
+ * Local port.
+ * Access: Index
+ *
+ * Note: not supported for CPU port.
+ */
+MLXSW_ITEM32_LP(reg, spfsr, 0x00, 16, 0x00, 12);
+
+/* reg_spfsr_security
+ * Security checks.
+ * 0: disabled (default)
+ * 1: enabled
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spfsr, security, 0x04, 31, 1);
+
+static inline void mlxsw_reg_spfsr_pack(char *payload, u16 local_port,
+ bool security)
+{
+ MLXSW_REG_ZERO(spfsr, payload);
+ mlxsw_reg_spfsr_local_port_set(payload, local_port);
+ mlxsw_reg_spfsr_security_set(payload, security);
+}
+
/* SPVC - Switch Port VLAN Classification Register
* -----------------------------------------------
* Configures the port to identify packets as untagged / single tagged /
@@ -4620,6 +4653,7 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19)
/* reg_ptys_ext_eth_proto_cap
* Extended Ethernet port supported speeds and protocols.
@@ -6315,6 +6349,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EAPOL,
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
@@ -12760,6 +12795,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
+ MLXSW_REG(spfsr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
MLXSW_REG(smpe),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5bcf5bceff71..f5b2d965d476 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -466,6 +466,24 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
return err;
}
+int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spfsr_pl[MLXSW_REG_SPFSR_LEN];
+ int err;
+
+ if (mlxsw_sp_port->security == enable)
+ return 0;
+
+ mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->security = enable;
+ return 0;
+}
+
int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
{
switch (ethtype) {
@@ -827,12 +845,12 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
for_each_possible_cpu(i) {
p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1259,16 +1277,6 @@ static int mlxsw_sp_set_features(struct net_device *dev,
return 0;
}
-static struct devlink_port *
-mlxsw_sp_port_get_devlink_port(struct net_device *dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-
- return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
- mlxsw_sp_port->local_port);
-}
-
static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct ifreq *ifr)
{
@@ -1342,7 +1350,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
.ndo_eth_ioctl = mlxsw_sp_port_ioctl,
};
@@ -1651,6 +1658,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
+ mlxsw_sp_port, dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
@@ -1839,8 +1848,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
goto err_register_netdev;
}
- mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
- mlxsw_sp_port, dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
@@ -1897,7 +1904,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
@@ -4754,6 +4760,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
return -EOPNOTSUPP;
}
+ if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index c8ff2a6d7e90..bbc73324451d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -321,7 +321,8 @@ struct mlxsw_sp_port {
struct mlxsw_sp *mlxsw_sp;
u16 local_port;
u8 lagged:1,
- split:1;
+ split:1,
+ security:1;
u16 pvid;
u16 lag_id;
struct {
@@ -687,6 +688,8 @@ int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
+int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable);
int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 ethtype);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index dcd79d7e2af4..472830d07ac1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1672,6 +1672,19 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = {
#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_800gaui_8[] = {
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_800gaui_8)
+
#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
@@ -1820,6 +1833,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.speed = SPEED_400000,
.width = 8,
},
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_800000,
+ .width = 8,
+ },
};
#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 7b01b9c20722..cbb6c75a6620 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -189,29 +189,17 @@ mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
- int neg_adj = 0;
- u32 diff;
- u64 adj;
s32 ppb;
ppb = scaled_ppm_to_ppb(scaled_ppm);
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
-
- adj = clock->nominal_c_mult;
- adj *= ppb;
- diff = div_u64(adj, NSEC_PER_SEC);
-
spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
- clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
- clock->nominal_c_mult + diff;
+ clock->cycles.mult = adjust_by_scaled_ppm(clock->nominal_c_mult,
+ scaled_ppm);
spin_unlock_bh(&clock->lock);
- return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
+ return mlxsw_sp_ptp_phc_adjfreq(&clock->common, ppb);
}
static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1290b2d3eae6..d88e62bc759f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -782,10 +782,25 @@ err_port_bridge_vlan_learning_set:
static int
mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_brport_flags flags)
+ const struct net_device *orig_dev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_PORT_LOCKED | BR_PORT_MAB)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported bridge port flag");
return -EINVAL;
+ }
+
+ if ((flags.mask & BR_PORT_LOCKED) && is_vlan_dev(orig_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a VLAN upper");
+ return -EINVAL;
+ }
+
+ if ((flags.mask & BR_PORT_LOCKED) && vlan_uses_dev(orig_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a bridge port that has VLAN uppers");
+ return -EINVAL;
+ }
return 0;
}
@@ -819,6 +834,13 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
+ if (flags.mask & BR_PORT_LOCKED) {
+ err = mlxsw_sp_port_security_set(mlxsw_sp_port,
+ flags.val & BR_PORT_LOCKED);
+ if (err)
+ return err;
+ }
+
if (bridge_port->bridge_device->multicast_enabled)
goto out;
@@ -1186,7 +1208,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
- attr->u.brport_flags);
+ attr->orig_dev,
+ attr->u.brport_flags,
+ extack);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
@@ -2783,6 +2807,7 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_device->ops->port_leave(bridge_device, bridge_port,
mlxsw_sp_port);
+ mlxsw_sp_port_security_set(mlxsw_sp_port, false);
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
@@ -2888,13 +2913,14 @@ static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
- struct net_device *dev, bool offloaded)
+ struct net_device *dev, bool offloaded, bool locked)
{
struct switchdev_notifier_fdb_info info = {};
info.addr = mac;
info.vid = vid;
info.offloaded = offloaded;
+ info.locked = locked;
call_switchdev_notifiers(type, dev, &info.info, NULL);
}
@@ -2941,6 +2967,12 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
evid = mlxsw_sp_port_vlan->vid;
+ if (adding && mlxsw_sp_port->security) {
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
+ vid, bridge_port->dev, false, true);
+ return;
+ }
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
@@ -2952,7 +2984,8 @@ do_fdb_op:
if (!do_notification)
return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
+ false);
return;
@@ -3004,6 +3037,12 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
lag_vid = mlxsw_sp_port_vlan->vid;
+ if (adding && mlxsw_sp_port->security) {
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
+ vid, bridge_port->dev, false, true);
+ return;
+ }
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -3015,7 +3054,8 @@ do_fdb_op:
if (!do_notification)
return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
+ false);
return;
@@ -3122,7 +3162,7 @@ static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding, false);
mlxsw_sp_fid_put(fid);
@@ -3264,7 +3304,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
&vxlan_fdb_info.info, NULL);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
vxlan_fdb_info.eth_addr,
- fdb_info->vid, dev, true);
+ fdb_info->vid, dev, true, false);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
@@ -3359,7 +3399,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
break;
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
fdb_info->addr,
- fdb_info->vid, dev, true);
+ fdb_info->vid, dev, true, false);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
@@ -3443,7 +3483,8 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
&vxlan_fdb_info->info, NULL);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
- vxlan_fdb_info->eth_addr, vid, dev, true);
+ vxlan_fdb_info->eth_addr, vid, dev, true,
+ false);
mlxsw_sp_fid_put(fid);
@@ -3495,7 +3536,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
false, false);
vid = bridge_device->ops->fid_vid(bridge_device, fid);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
- vxlan_fdb_info->eth_addr, vid, dev, false);
+ vxlan_fdb_info->eth_addr, vid, dev, false,
+ false);
mlxsw_sp_fid_put(fid);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index f4bfdb6dab9c..899c954e0e5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -510,6 +510,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(20, 10240, 4096),
},
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(21, 128, 128),
+ },
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -628,6 +631,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
.priority = 4,
},
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(EAPOL, 21),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EAPOL,
+ .priority = 5,
+ },
};
static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
@@ -1160,6 +1168,23 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
MLXSW_SP_RXL_DISCARD(ROUTER3, L3_DISCARDS),
},
},
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(EAPOL, EAPOL, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(EAPOL, EAPOL, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(LOCKED_PORT, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, FDB_MISS,
+ TRAP_EXCEPTION_TO_CPU, false,
+ SP_L2_DISCARDS, DISCARD, SP_L2_DISCARDS),
+ MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, FDB_MISMATCH,
+ TRAP_EXCEPTION_TO_CPU, false,
+ SP_L2_DISCARDS, DISCARD, SP_L2_DISCARDS),
+ },
+ },
};
static struct mlxsw_sp_trap_policer_item *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 8da169663bda..83477c8e6971 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -25,6 +25,8 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
+ MLXSW_TRAP_ID_FDB_MISS = 0x3A,
+ MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
MLXSW_TRAP_ID_MTUERROR = 0x52,
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index ed7a35c3ceac..24c994baad13 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -57,5 +57,6 @@ config LAN743X
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
+source "drivers/net/ethernet/microchip/vcap/Kconfig"
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 9faa41436198..bbd349264e6f 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -11,3 +11,4 @@ lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
+obj-$(CONFIG_VCAP) += vcap/
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index c739d60ee17d..2db5949b4c7e 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1190,14 +1190,12 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
}
#endif /* CONFIG_PM */
-static void lan743x_common_regs(struct net_device *dev,
- struct ethtool_regs *regs, void *p)
-
+static void lan743x_common_regs(struct net_device *dev, void *p)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
u32 *rb = p;
- memset(p, 0, (MAX_LAN743X_ETH_REGS * sizeof(u32)));
+ memset(p, 0, (MAX_LAN743X_ETH_COMMON_REGS * sizeof(u32)));
rb[ETH_PRIV_FLAGS] = adapter->flags;
rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV);
@@ -1220,17 +1218,164 @@ static void lan743x_common_regs(struct net_device *dev,
rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC);
}
+static void lan743x_sgmii_regs(struct net_device *dev, void *p)
+{
+ struct lan743x_adapter *adp = netdev_priv(dev);
+ u32 *rb = p;
+ u16 idx;
+ int val;
+ struct {
+ u8 id;
+ u8 dev;
+ u16 addr;
+ } regs[] = {
+ { ETH_SR_VSMMD_DEV_ID1, MDIO_MMD_VEND1, 0x0002},
+ { ETH_SR_VSMMD_DEV_ID2, MDIO_MMD_VEND1, 0x0003},
+ { ETH_SR_VSMMD_PCS_ID1, MDIO_MMD_VEND1, 0x0004},
+ { ETH_SR_VSMMD_PCS_ID2, MDIO_MMD_VEND1, 0x0005},
+ { ETH_SR_VSMMD_STS, MDIO_MMD_VEND1, 0x0008},
+ { ETH_SR_VSMMD_CTRL, MDIO_MMD_VEND1, 0x0009},
+ { ETH_SR_MII_CTRL, MDIO_MMD_VEND2, 0x0000},
+ { ETH_SR_MII_STS, MDIO_MMD_VEND2, 0x0001},
+ { ETH_SR_MII_DEV_ID1, MDIO_MMD_VEND2, 0x0002},
+ { ETH_SR_MII_DEV_ID2, MDIO_MMD_VEND2, 0x0003},
+ { ETH_SR_MII_AN_ADV, MDIO_MMD_VEND2, 0x0004},
+ { ETH_SR_MII_LP_BABL, MDIO_MMD_VEND2, 0x0005},
+ { ETH_SR_MII_EXPN, MDIO_MMD_VEND2, 0x0006},
+ { ETH_SR_MII_EXT_STS, MDIO_MMD_VEND2, 0x000F},
+ { ETH_SR_MII_TIME_SYNC_ABL, MDIO_MMD_VEND2, 0x0708},
+ { ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_LWR, MDIO_MMD_VEND2, 0x0709},
+ { ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_UPR, MDIO_MMD_VEND2, 0x070A},
+ { ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_LWR, MDIO_MMD_VEND2, 0x070B},
+ { ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_UPR, MDIO_MMD_VEND2, 0x070C},
+ { ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_LWR, MDIO_MMD_VEND2, 0x070D},
+ { ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_UPR, MDIO_MMD_VEND2, 0x070E},
+ { ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_LWR, MDIO_MMD_VEND2, 0x070F},
+ { ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_UPR, MDIO_MMD_VEND2, 0x0710},
+ { ETH_VR_MII_DIG_CTRL1, MDIO_MMD_VEND2, 0x8000},
+ { ETH_VR_MII_AN_CTRL, MDIO_MMD_VEND2, 0x8001},
+ { ETH_VR_MII_AN_INTR_STS, MDIO_MMD_VEND2, 0x8002},
+ { ETH_VR_MII_TC, MDIO_MMD_VEND2, 0x8003},
+ { ETH_VR_MII_DBG_CTRL, MDIO_MMD_VEND2, 0x8005},
+ { ETH_VR_MII_EEE_MCTRL0, MDIO_MMD_VEND2, 0x8006},
+ { ETH_VR_MII_EEE_TXTIMER, MDIO_MMD_VEND2, 0x8008},
+ { ETH_VR_MII_EEE_RXTIMER, MDIO_MMD_VEND2, 0x8009},
+ { ETH_VR_MII_LINK_TIMER_CTRL, MDIO_MMD_VEND2, 0x800A},
+ { ETH_VR_MII_EEE_MCTRL1, MDIO_MMD_VEND2, 0x800B},
+ { ETH_VR_MII_DIG_STS, MDIO_MMD_VEND2, 0x8010},
+ { ETH_VR_MII_ICG_ERRCNT1, MDIO_MMD_VEND2, 0x8011},
+ { ETH_VR_MII_GPIO, MDIO_MMD_VEND2, 0x8015},
+ { ETH_VR_MII_EEE_LPI_STATUS, MDIO_MMD_VEND2, 0x8016},
+ { ETH_VR_MII_EEE_WKERR, MDIO_MMD_VEND2, 0x8017},
+ { ETH_VR_MII_MISC_STS, MDIO_MMD_VEND2, 0x8018},
+ { ETH_VR_MII_RX_LSTS, MDIO_MMD_VEND2, 0x8020},
+ { ETH_VR_MII_GEN2_GEN4_TX_BSTCTRL0, MDIO_MMD_VEND2, 0x8038},
+ { ETH_VR_MII_GEN2_GEN4_TX_LVLCTRL0, MDIO_MMD_VEND2, 0x803A},
+ { ETH_VR_MII_GEN2_GEN4_TXGENCTRL0, MDIO_MMD_VEND2, 0x803C},
+ { ETH_VR_MII_GEN2_GEN4_TXGENCTRL1, MDIO_MMD_VEND2, 0x803D},
+ { ETH_VR_MII_GEN4_TXGENCTRL2, MDIO_MMD_VEND2, 0x803E},
+ { ETH_VR_MII_GEN2_GEN4_TX_STS, MDIO_MMD_VEND2, 0x8048},
+ { ETH_VR_MII_GEN2_GEN4_RXGENCTRL0, MDIO_MMD_VEND2, 0x8058},
+ { ETH_VR_MII_GEN2_GEN4_RXGENCTRL1, MDIO_MMD_VEND2, 0x8059},
+ { ETH_VR_MII_GEN4_RXEQ_CTRL, MDIO_MMD_VEND2, 0x805B},
+ { ETH_VR_MII_GEN4_RXLOS_CTRL0, MDIO_MMD_VEND2, 0x805D},
+ { ETH_VR_MII_GEN2_GEN4_MPLL_CTRL0, MDIO_MMD_VEND2, 0x8078},
+ { ETH_VR_MII_GEN2_GEN4_MPLL_CTRL1, MDIO_MMD_VEND2, 0x8079},
+ { ETH_VR_MII_GEN2_GEN4_MPLL_STS, MDIO_MMD_VEND2, 0x8088},
+ { ETH_VR_MII_GEN2_GEN4_LVL_CTRL, MDIO_MMD_VEND2, 0x8090},
+ { ETH_VR_MII_GEN4_MISC_CTRL2, MDIO_MMD_VEND2, 0x8093},
+ { ETH_VR_MII_GEN2_GEN4_MISC_CTRL0, MDIO_MMD_VEND2, 0x8099},
+ { ETH_VR_MII_GEN2_GEN4_MISC_CTRL1, MDIO_MMD_VEND2, 0x809A},
+ { ETH_VR_MII_SNPS_CR_CTRL, MDIO_MMD_VEND2, 0x80A0},
+ { ETH_VR_MII_SNPS_CR_ADDR, MDIO_MMD_VEND2, 0x80A1},
+ { ETH_VR_MII_SNPS_CR_DATA, MDIO_MMD_VEND2, 0x80A2},
+ { ETH_VR_MII_DIG_CTRL2, MDIO_MMD_VEND2, 0x80E1},
+ { ETH_VR_MII_DIG_ERRCNT, MDIO_MMD_VEND2, 0x80E2},
+ };
+
+ for (idx = 0; idx < ARRAY_SIZE(regs); idx++) {
+ val = lan743x_sgmii_read(adp, regs[idx].dev, regs[idx].addr);
+ if (val < 0)
+ rb[regs[idx].id] = 0xFFFF;
+ else
+ rb[regs[idx].id] = val;
+ }
+}
+
static int lan743x_get_regs_len(struct net_device *dev)
{
- return MAX_LAN743X_ETH_REGS * sizeof(u32);
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ u32 num_regs = MAX_LAN743X_ETH_COMMON_REGS;
+
+ if (adapter->is_sgmii_en)
+ num_regs += MAX_LAN743X_ETH_SGMII_REGS;
+
+ return num_regs * sizeof(u32);
}
static void lan743x_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *p)
{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ int regs_len;
+
+ regs_len = lan743x_get_regs_len(dev);
+ memset(p, 0, regs_len);
+
regs->version = LAN743X_ETH_REG_VERSION;
+ regs->len = regs_len;
+
+ lan743x_common_regs(dev, p);
+ p = (u32 *)p + MAX_LAN743X_ETH_COMMON_REGS;
- lan743x_common_regs(dev, regs, p);
+ if (adapter->is_sgmii_en) {
+ lan743x_sgmii_regs(dev, p);
+ p = (u32 *)p + MAX_LAN743X_ETH_SGMII_REGS;
+ }
+}
+
+static void lan743x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ struct lan743x_phy *phy = &adapter->phy;
+
+ if (phy->fc_request_control & FLOW_CTRL_TX)
+ pause->tx_pause = 1;
+ if (phy->fc_request_control & FLOW_CTRL_RX)
+ pause->rx_pause = 1;
+ pause->autoneg = phy->fc_autoneg;
+}
+
+static int lan743x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ struct lan743x_phy *phy = &adapter->phy;
+
+ if (!phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(phydev, pause))
+ return -EINVAL;
+
+ phy->fc_request_control = 0;
+ if (pause->rx_pause)
+ phy->fc_request_control |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ phy->fc_request_control |= FLOW_CTRL_TX;
+
+ phy->fc_autoneg = pause->autoneg;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
+ pause->rx_pause);
+ else
+ phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
+
+ return 0;
}
const struct ethtool_ops lan743x_ethtool_ops = {
@@ -1259,6 +1404,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_regs_len = lan743x_get_regs_len,
.get_regs = lan743x_get_regs,
+ .get_pauseparam = lan743x_get_pauseparam,
+ .set_pauseparam = lan743x_set_pauseparam,
#ifdef CONFIG_PM
.get_wol = lan743x_ethtool_get_wol,
.set_wol = lan743x_ethtool_set_wol,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
index 7f5996a52488..267d5035b8ad 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.h
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -29,7 +29,76 @@ enum {
ETH_WK_SRC,
/* Add new registers above */
- MAX_LAN743X_ETH_REGS
+ MAX_LAN743X_ETH_COMMON_REGS
+};
+
+enum {
+ /* SGMII Register */
+ ETH_SR_VSMMD_DEV_ID1,
+ ETH_SR_VSMMD_DEV_ID2,
+ ETH_SR_VSMMD_PCS_ID1,
+ ETH_SR_VSMMD_PCS_ID2,
+ ETH_SR_VSMMD_STS,
+ ETH_SR_VSMMD_CTRL,
+ ETH_SR_MII_CTRL,
+ ETH_SR_MII_STS,
+ ETH_SR_MII_DEV_ID1,
+ ETH_SR_MII_DEV_ID2,
+ ETH_SR_MII_AN_ADV,
+ ETH_SR_MII_LP_BABL,
+ ETH_SR_MII_EXPN,
+ ETH_SR_MII_EXT_STS,
+ ETH_SR_MII_TIME_SYNC_ABL,
+ ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_TX_MAX_DLY_UPR,
+ ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_TX_MIN_DLY_UPR,
+ ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_RX_MAX_DLY_UPR,
+ ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_LWR,
+ ETH_SR_MII_TIME_SYNC_RX_MIN_DLY_UPR,
+ ETH_VR_MII_DIG_CTRL1,
+ ETH_VR_MII_AN_CTRL,
+ ETH_VR_MII_AN_INTR_STS,
+ ETH_VR_MII_TC,
+ ETH_VR_MII_DBG_CTRL,
+ ETH_VR_MII_EEE_MCTRL0,
+ ETH_VR_MII_EEE_TXTIMER,
+ ETH_VR_MII_EEE_RXTIMER,
+ ETH_VR_MII_LINK_TIMER_CTRL,
+ ETH_VR_MII_EEE_MCTRL1,
+ ETH_VR_MII_DIG_STS,
+ ETH_VR_MII_ICG_ERRCNT1,
+ ETH_VR_MII_GPIO,
+ ETH_VR_MII_EEE_LPI_STATUS,
+ ETH_VR_MII_EEE_WKERR,
+ ETH_VR_MII_MISC_STS,
+ ETH_VR_MII_RX_LSTS,
+ ETH_VR_MII_GEN2_GEN4_TX_BSTCTRL0,
+ ETH_VR_MII_GEN2_GEN4_TX_LVLCTRL0,
+ ETH_VR_MII_GEN2_GEN4_TXGENCTRL0,
+ ETH_VR_MII_GEN2_GEN4_TXGENCTRL1,
+ ETH_VR_MII_GEN4_TXGENCTRL2,
+ ETH_VR_MII_GEN2_GEN4_TX_STS,
+ ETH_VR_MII_GEN2_GEN4_RXGENCTRL0,
+ ETH_VR_MII_GEN2_GEN4_RXGENCTRL1,
+ ETH_VR_MII_GEN4_RXEQ_CTRL,
+ ETH_VR_MII_GEN4_RXLOS_CTRL0,
+ ETH_VR_MII_GEN2_GEN4_MPLL_CTRL0,
+ ETH_VR_MII_GEN2_GEN4_MPLL_CTRL1,
+ ETH_VR_MII_GEN2_GEN4_MPLL_STS,
+ ETH_VR_MII_GEN2_GEN4_LVL_CTRL,
+ ETH_VR_MII_GEN4_MISC_CTRL2,
+ ETH_VR_MII_GEN2_GEN4_MISC_CTRL0,
+ ETH_VR_MII_GEN2_GEN4_MISC_CTRL1,
+ ETH_VR_MII_SNPS_CR_CTRL,
+ ETH_VR_MII_SNPS_CR_ADDR,
+ ETH_VR_MII_SNPS_CR_DATA,
+ ETH_VR_MII_DIG_CTRL2,
+ ETH_VR_MII_DIG_ERRCNT,
+
+ /* Add new registers above */
+ MAX_LAN743X_ETH_SGMII_REGS
};
extern const struct ethtool_ops lan743x_ethtool_ops;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 50eeecba1f18..534840f9a7ca 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -939,7 +939,7 @@ static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
return ret;
}
-static int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
+int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
{
u32 mmd_access;
int ret;
@@ -1326,8 +1326,8 @@ static void lan743x_mac_close(struct lan743x_adapter *adapter)
1, 1000, 20000, 100);
}
-static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
- bool tx_enable, bool rx_enable)
+void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
+ bool tx_enable, bool rx_enable)
{
u32 flow_setting = 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 67877d3b6dd9..8438c3dbcf36 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1159,5 +1159,8 @@ u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, u16 timeout);
void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
+void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
+ bool tx_enable, bool rx_enable);
+int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index da3ea905adbb..39e1066ecd5f 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -339,59 +339,18 @@ static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
struct lan743x_adapter *adapter =
container_of(ptp, struct lan743x_adapter, ptp);
u32 lan743x_rate_adj = 0;
- bool positive = true;
- u64 u64_delta = 0;
+ u64 u64_delta;
if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) ||
scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) {
return -EINVAL;
}
- if (scaled_ppm > 0) {
- u64_delta = (u64)scaled_ppm;
- positive = true;
- } else {
- u64_delta = (u64)(-scaled_ppm);
- positive = false;
- }
- u64_delta = (u64_delta << 19);
- lan743x_rate_adj = div_u64(u64_delta, 1000000);
-
- if (positive)
- lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
-
- lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
- lan743x_rate_adj);
-
- return 0;
-}
-
-static int lan743x_ptpci_adjfreq(struct ptp_clock_info *ptpci, s32 delta_ppb)
-{
- struct lan743x_ptp *ptp =
- container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
- struct lan743x_adapter *adapter =
- container_of(ptp, struct lan743x_adapter, ptp);
- u32 lan743x_rate_adj = 0;
- bool positive = true;
- u32 u32_delta = 0;
- u64 u64_delta = 0;
- if ((delta_ppb < (-LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB)) ||
- delta_ppb > LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB) {
- return -EINVAL;
- }
- if (delta_ppb > 0) {
- u32_delta = (u32)delta_ppb;
- positive = true;
- } else {
- u32_delta = (u32)(-delta_ppb);
- positive = false;
- }
- u64_delta = (((u64)u32_delta) << 35);
- lan743x_rate_adj = div_u64(u64_delta, 1000000000);
-
- if (positive)
- lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+ /* diff_by_scaled_ppm returns true if the difference is negative */
+ if (diff_by_scaled_ppm(1ULL << 35, scaled_ppm, &u64_delta))
+ lan743x_rate_adj = (u32)u64_delta;
+ else
+ lan743x_rate_adj = (u32)u64_delta | PTP_CLOCK_RATE_ADJ_DIR_;
lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
lan743x_rate_adj);
@@ -1583,7 +1542,6 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
- ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64;
ptp->ptp_clock_info.getcrosststamp = NULL;
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index 49e1464a4313..8bcd60f17d6d 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -7,5 +7,7 @@ config LAN966X_SWITCH
depends on BRIDGE || BRIDGE=n
select PHYLINK
select PACKING
+ select PAGE_POOL
+ select VCAP
help
This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 962f7c5f9e7d..56afd694f3c7 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -11,4 +11,9 @@ lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \
lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \
- lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o
+ lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o \
+ lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \
+ lan966x_tc_flower.o lan966x_goto.o
+
+# Provide include files
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index e6948939ccc2..5314c064ceae 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+
#include "lan966x_main.h"
static int lan966x_fdma_channel_active(struct lan966x *lan966x)
@@ -10,50 +13,39 @@ static int lan966x_fdma_channel_active(struct lan966x *lan966x)
static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
struct lan966x_db *db)
{
- struct lan966x *lan966x = rx->lan966x;
- dma_addr_t dma_addr;
struct page *page;
- page = dev_alloc_pages(rx->page_order);
+ page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
return NULL;
- dma_addr = dma_map_page(lan966x->dev, page, 0,
- PAGE_SIZE << rx->page_order,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
- goto free_page;
-
- db->dataptr = dma_addr;
+ db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
return page;
-
-free_page:
- __free_pages(page, rx->page_order);
- return NULL;
}
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{
- struct lan966x *lan966x = rx->lan966x;
- struct lan966x_rx_dcb *dcb;
- struct lan966x_db *db;
int i, j;
for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &rx->dcbs[i];
-
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- dma_unmap_single(lan966x->dev,
- (dma_addr_t)db->dataptr,
- PAGE_SIZE << rx->page_order,
- DMA_FROM_DEVICE);
- __free_pages(rx->page[i][j], rx->page_order);
- }
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
}
}
+static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
+{
+ struct page *page;
+
+ page = rx->page[rx->dcb_index][rx->db_index];
+ if (unlikely(!page))
+ return;
+
+ page_pool_recycle_direct(rx->page_pool, page);
+}
+
static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
struct lan966x_rx_dcb *dcb,
u64 nextptr)
@@ -73,6 +65,41 @@ static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
rx->last_entry = dcb;
}
+static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct page_pool_params pp_params = {
+ .order = rx->page_order,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = FDMA_DCB_MAX,
+ .nid = NUMA_NO_NODE,
+ .dev = lan966x->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = XDP_PACKET_HEADROOM,
+ .max_len = rx->max_mtu -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ if (lan966x_xdp_present(lan966x))
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ rx->page_pool = page_pool_create(&pp_params);
+
+ for (int i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ port = lan966x->ports[i];
+ xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
+ xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ }
+
+ return PTR_ERR_OR_ZERO(rx->page_pool);
+}
+
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
@@ -82,6 +109,9 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
int i, j;
int size;
+ if (lan966x_fdma_rx_alloc_page_pool(rx))
+ return PTR_ERR(rx->page_pool);
+
/* calculate how many pages are needed to allocate the dcbs */
size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
size = ALIGN(size, PAGE_SIZE);
@@ -116,6 +146,12 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
return 0;
}
+static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
+{
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+}
+
static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
@@ -355,11 +391,14 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
{
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct xdp_frame_bulk bq;
struct lan966x_db *db;
unsigned long flags;
bool clear = false;
int i;
+ xdp_frame_bulk_init(&bq);
+
spin_lock_irqsave(&lan966x->tx_lock, flags);
for (i = 0; i < FDMA_DCB_MAX; ++i) {
dcb_buf = &tx->dcbs_buf[i];
@@ -372,19 +411,35 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
continue;
dcb_buf->dev->stats.tx_packets++;
- dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
dcb_buf->used = false;
- dma_unmap_single(lan966x->dev,
- dcb_buf->dma_addr,
- dcb_buf->skb->len,
- DMA_TO_DEVICE);
- if (!dcb_buf->ptp)
- dev_kfree_skb_any(dcb_buf->skb);
+ if (dcb_buf->use_skb) {
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (!dcb_buf->ptp)
+ napi_consume_skb(dcb_buf->data.skb, weight);
+ } else {
+ if (dcb_buf->xdp_ndo)
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (dcb_buf->xdp_ndo)
+ xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
+ else
+ xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
+ }
clear = true;
}
+ xdp_flush_frame_bulk(&bq);
+
if (clear)
lan966x_fdma_wakeup_netdev(lan966x);
@@ -403,40 +458,61 @@ static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
return true;
}
-static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
+static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
{
struct lan966x *lan966x = rx->lan966x;
- u64 src_port, timestamp;
+ struct lan966x_port *port;
struct lan966x_db *db;
- struct sk_buff *skb;
struct page *page;
- /* Get the received frame and unmap it */
db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
page = rx->page[rx->dcb_index][rx->db_index];
+ if (unlikely(!page))
+ return FDMA_ERROR;
- dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr,
+ dma_sync_single_for_cpu(lan966x->dev,
+ (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
FDMA_DCB_STATUS_BLOCKL(db->status),
DMA_FROM_DEVICE);
+ lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
+ src_port);
+ if (WARN_ON(*src_port >= lan966x->num_phys_ports))
+ return FDMA_ERROR;
+
+ port = lan966x->ports[*src_port];
+ if (!lan966x_xdp_port_present(port))
+ return FDMA_PASS;
+
+ return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
+ u64 src_port)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ u64 timestamp;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+
skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
if (unlikely(!skb))
- goto unmap_page;
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
- lan966x_ifh_get_src_port(skb->data, &src_port);
lan966x_ifh_get_timestamp(skb->data, &timestamp);
- if (WARN_ON(src_port >= lan966x->num_phys_ports))
- goto free_skb;
-
- dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr,
- PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
-
skb->dev = lan966x->ports[src_port]->dev;
- skb_pull(skb, IFH_LEN * sizeof(u32));
+ skb_pull(skb, IFH_LEN_BYTES);
if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
skb_trim(skb, skb->len - ETH_FCS_LEN);
@@ -457,13 +533,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
return skb;
-free_skb:
- kfree_skb(skb);
-unmap_page:
- dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr,
- PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- __free_pages(page, rx->page_order);
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
return NULL;
}
@@ -475,9 +546,11 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
int dcb_reload = rx->dcb_index;
struct lan966x_rx_dcb *old_dcb;
struct lan966x_db *db;
+ bool redirect = false;
struct sk_buff *skb;
struct page *page;
int counter = 0;
+ u64 src_port;
u64 nextptr;
lan966x_fdma_tx_clear_buf(lan966x, weight);
@@ -487,19 +560,36 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
if (!lan966x_fdma_rx_more_frames(rx))
break;
- skb = lan966x_fdma_rx_get_frame(rx);
+ counter++;
- rx->page[rx->dcb_index][rx->db_index] = NULL;
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
+ switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
+ case FDMA_PASS:
+ break;
+ case FDMA_ERROR:
+ lan966x_fdma_rx_free_page(rx);
+ lan966x_fdma_rx_advance_dcb(rx);
+ goto allocate_new;
+ case FDMA_REDIRECT:
+ redirect = true;
+ fallthrough;
+ case FDMA_TX:
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
+ case FDMA_DROP:
+ lan966x_fdma_rx_free_page(rx);
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
+ }
+ skb = lan966x_fdma_rx_get_frame(rx, src_port);
+ lan966x_fdma_rx_advance_dcb(rx);
if (!skb)
- break;
+ goto allocate_new;
napi_gro_receive(&lan966x->napi, skb);
- counter++;
}
+allocate_new:
/* Allocate new pages and map them */
while (dcb_reload != rx->dcb_index) {
db = &rx->dcbs[dcb_reload].db[rx->db_index];
@@ -521,6 +611,9 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
if (counter < weight && napi_complete_done(napi, counter))
lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+ if (redirect)
+ xdp_do_flush();
+
return counter;
}
@@ -565,14 +658,139 @@ static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
return -1;
}
+static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
+ int next_to_use, int len,
+ dma_addr_t dma_addr)
+{
+ struct lan966x_tx_dcb *next_dcb;
+ struct lan966x_db *next_db;
+
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(len);
+}
+
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+}
+
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ bool dma_map)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx *tx = &lan966x->tx;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ __be32 *ifh;
+ int ret = 0;
+
+ spin_lock(&lan966x->tx_lock);
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(port->dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Generate new IFH */
+ if (dma_map) {
+ if (xdpf->headroom < IFH_LEN_BYTES) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ ifh = xdpf->data - IFH_LEN_BYTES;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = dma_map_single(lan966x->dev,
+ xdpf->data - IFH_LEN_BYTES,
+ xdpf->len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ xdpf->len + IFH_LEN_BYTES,
+ dma_addr);
+ } else {
+ ifh = page_address(page) + XDP_PACKET_HEADROOM;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = page_pool_get_dma_addr(page);
+ dma_sync_single_for_device(lan966x->dev,
+ dma_addr + XDP_PACKET_HEADROOM,
+ xdpf->len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ xdpf->len + IFH_LEN_BYTES,
+ dma_addr + XDP_PACKET_HEADROOM);
+ }
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->use_skb = false;
+ next_dcb_buf->data.xdpf = xdpf;
+ next_dcb_buf->xdp_ndo = dma_map;
+ next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = port->dev;
+
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
+
+out:
+ spin_unlock(&lan966x->tx_lock);
+
+ return ret;
+}
+
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
struct lan966x_tx_dcb_buf *next_dcb_buf;
- struct lan966x_tx_dcb *next_dcb, *dcb;
struct lan966x_tx *tx = &lan966x->tx;
- struct lan966x_db *next_db;
int needed_headroom;
int needed_tailroom;
dma_addr_t dma_addr;
@@ -592,7 +810,7 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
}
/* skb processing */
- needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
+ needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
@@ -605,8 +823,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
}
skb_tx_timestamp(skb);
- skb_push(skb, IFH_LEN * sizeof(u32));
- memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
+ skb_push(skb, IFH_LEN_BYTES);
+ memcpy(skb->data, ifh, IFH_LEN_BYTES);
skb_put(skb, 4);
dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
@@ -618,20 +836,14 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
}
/* Setup next dcb */
- next_dcb = &tx->dcbs[next_to_use];
- next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
-
- next_db = &next_dcb->db[0];
- next_db->dataptr = dma_addr;
- next_db->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_INTR |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(skb->len);
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
- next_dcb_buf->skb = skb;
+ next_dcb_buf->use_skb = true;
+ next_dcb_buf->data.skb = skb;
+ next_dcb_buf->xdp_ndo = false;
+ next_dcb_buf->len = skb->len;
next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true;
next_dcb_buf->ptp = false;
@@ -641,21 +853,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
- if (likely(lan966x->tx.activated)) {
- /* Connect current dcb to the next db */
- dcb = &tx->dcbs[tx->last_in_use];
- dcb->nextptr = tx->dma + (next_to_use *
- sizeof(struct lan966x_tx_dcb));
-
- lan966x_fdma_tx_reload(tx);
- } else {
- /* Because it is first time, then just activate */
- lan966x->tx.activated = true;
- lan966x_fdma_tx_activate(tx);
- }
-
- /* Move to next dcb because this last in use */
- tx->last_in_use = next_to_use;
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
return NETDEV_TX_OK;
@@ -696,6 +895,7 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{
+ struct page_pool *page_pool;
dma_addr_t rx_dma;
void *rx_dcbs;
u32 size;
@@ -704,6 +904,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
/* Store these for later to free them */
rx_dma = lan966x->rx.dma;
rx_dcbs = lan966x->rx.dcbs;
+ page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi);
napi_disable(&lan966x->napi);
@@ -712,6 +913,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
lan966x_fdma_rx_disable(&lan966x->rx);
lan966x_fdma_rx_free_pages(&lan966x->rx);
lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ lan966x->rx.max_mtu = new_mtu;
err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err)
goto restore;
@@ -721,11 +923,14 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
size = ALIGN(size, PAGE_SIZE);
dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+ page_pool_destroy(page_pool);
+
lan966x_fdma_wakeup_netdev(lan966x);
napi_enable(&lan966x->napi);
return err;
restore:
+ lan966x->rx.page_pool = page_pool;
lan966x->rx.dma = rx_dma;
lan966x->rx.dcbs = rx_dcbs;
lan966x_fdma_rx_start(&lan966x->rx);
@@ -733,21 +938,20 @@ restore:
return err;
}
-int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
+{
+ return lan966x_fdma_get_max_mtu(lan966x) +
+ IFH_LEN_BYTES +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ VLAN_HLEN * 2 +
+ XDP_PACKET_HEADROOM;
+}
+
+static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
{
- int max_mtu;
int err;
u32 val;
- max_mtu = lan966x_fdma_get_max_mtu(lan966x);
- max_mtu += IFH_LEN * sizeof(u32);
- max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- max_mtu += VLAN_HLEN * 2;
-
- if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
- lan966x->rx.page_order)
- return 0;
-
/* Disable the CPU port */
lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
QSYS_SW_PORT_MODE_PORT_ENA,
@@ -773,6 +977,25 @@ int lan966x_fdma_change_mtu(struct lan966x *lan966x)
return err;
}
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ if (max_mtu == lan966x->rx.max_mtu)
+ return 0;
+
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
{
if (lan966x->fdma_ndev)
@@ -800,6 +1023,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
lan966x->rx.lan966x = lan966x;
lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x;
lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
lan966x->tx.last_in_use = -1;
@@ -832,5 +1056,6 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
lan966x_fdma_rx_free_pages(&lan966x->rx);
lan966x_fdma_rx_free(&lan966x->rx);
+ page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
new file mode 100644
index 000000000000..bf0cfe24a8fc
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "vcap_api_client.h"
+
+int lan966x_goto_port_add(struct lan966x_port *port,
+ struct flow_action_entry *act,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev,
+ act->chain_index, goto_id,
+ true);
+ if (err == -EFAULT) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported goto chain");
+ return -EOPNOTSUPP;
+ }
+
+ if (err == -EADDRINUSE) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP already enabled");
+ return -EOPNOTSUPP;
+ }
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not enable VCAP lookups");
+ return err;
+ }
+
+ port->tc.goto_id = goto_id;
+
+ return 0;
+}
+
+int lan966x_goto_port_del(struct lan966x_port *port,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0,
+ goto_id, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not disable VCAP lookups");
+ return err;
+ }
+
+ port->tc.goto_id = 0;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
index ca3314789d18..f3b1e0d31826 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -8,6 +8,7 @@
*/
#define IFH_LEN 7
+#define IFH_LEN_BYTES (IFH_LEN * sizeof(u32))
/* Timestamp for frame */
#define IFH_POS_TIMESTAMP 192
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 20ee5b28f70a..cadde20505ba 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -47,6 +47,9 @@ static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */
{ TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
{ TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
+ { TARGET_VCAP, 0x18000, 1 }, /* 0xe2018000 */
+ { TARGET_VCAP + 1, 0x20000, 1 }, /* 0xe2020000 */
+ { TARGET_VCAP + 2, 0x24000, 1 }, /* 0xe2024000 */
{ TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
{ TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
{ TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
@@ -302,13 +305,13 @@ err:
return NETDEV_TX_BUSY;
}
-static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
}
-static void lan966x_ifh_set_port(void *ifh, u64 bypass)
+void lan966x_ifh_set_port(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
@@ -440,11 +443,22 @@ static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (cmd == SIOCSHWTSTAMP) {
+ err = lan966x_ptp_setup_traps(port, ifr);
+ if (err)
+ return err;
+ }
if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) {
switch (cmd) {
case SIOCSHWTSTAMP:
- return lan966x_ptp_hwtstamp_set(port, ifr);
+ err = lan966x_ptp_hwtstamp_set(port, ifr);
+ if (err)
+ lan966x_ptp_del_traps(port);
+
+ return err;
case SIOCGHWTSTAMP:
return lan966x_ptp_hwtstamp_get(port, ifr);
}
@@ -453,7 +467,11 @@ static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ err = phy_mii_ioctl(dev->phydev, ifr, cmd);
+ if (err && cmd == SIOCSHWTSTAMP)
+ lan966x_ptp_del_traps(port);
+
+ return err;
}
static const struct net_device_ops lan966x_port_netdev_ops = {
@@ -468,6 +486,8 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
.ndo_eth_ioctl = lan966x_port_ioctl,
.ndo_setup_tc = lan966x_tc_setup,
+ .ndo_bpf = lan966x_xdp,
+ .ndo_xdp_xmit = lan966x_xdp_xmit,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -694,6 +714,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (port->dev)
unregister_netdev(port->dev);
+ lan966x_xdp_port_deinit(port);
if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
lan966x_fdma_netdev_deinit(lan966x, port->dev);
@@ -760,7 +781,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC;
- dev->needed_headroom = IFH_LEN * sizeof(u32);
+ dev->needed_headroom = IFH_LEN_BYTES;
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -1136,6 +1157,9 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ports[p]->serdes = serdes;
lan966x_port_init(lan966x->ports[p]);
+ err = lan966x_xdp_port_init(lan966x->ports[p]);
+ if (err)
+ goto cleanup_ports;
}
lan966x_mdb_init(lan966x);
@@ -1151,8 +1175,15 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_ptp;
+ err = lan966x_vcap_init(lan966x);
+ if (err)
+ goto cleanup_fdma;
+
return 0;
+cleanup_fdma:
+ lan966x_fdma_deinit(lan966x);
+
cleanup_ptp:
lan966x_ptp_deinit(lan966x);
@@ -1176,6 +1207,7 @@ static int lan966x_remove(struct platform_device *pdev)
struct lan966x *lan966x = platform_get_drvdata(pdev);
lan966x_taprio_deinit(lan966x);
+ lan966x_vcap_deinit(lan966x);
lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 4ec33999e4df..3491f1961835 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -9,6 +9,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/switchdev.h>
@@ -87,6 +88,10 @@
#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+#define LAN966X_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
+#define LAN966X_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
+#define LAN966X_VCAP_CID_IS2_MAX (VCAP_CID_INGRESS_STAGE2_L2 - 1) /* IS2 Max */
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -100,6 +105,29 @@ enum macaccess_entry_type {
ENTRYTYPE_MACV6,
};
+/* FDMA return action codes for checking if the frame is valid
+ * FDMA_PASS, frame is valid and can be used
+ * FDMA_ERROR, something went wrong, stop getting more frames
+ * FDMA_DROP, frame is dropped, but continue to get more frames
+ * FDMA_TX, frame is given to TX, but continue to get more frames
+ * FDMA_REDIRECT, frame is given to TX, but continue to get more frames
+ */
+enum lan966x_fdma_action {
+ FDMA_PASS = 0,
+ FDMA_ERROR,
+ FDMA_DROP,
+ FDMA_TX,
+ FDMA_REDIRECT,
+};
+
+/* Controls how PORT_MASK is applied */
+enum LAN966X_PORT_MASK_MODE {
+ LAN966X_PMM_NO_ACTION,
+ LAN966X_PMM_REPLACE,
+ LAN966X_PMM_FORWARDING,
+ LAN966X_PMM_REDIRECT,
+};
+
struct lan966x_port;
struct lan966x_db {
@@ -150,15 +178,28 @@ struct lan966x_rx {
*/
u8 page_order;
+ /* Represents the max size frame that it can receive to the CPU. This
+ * includes the IFH + VLAN tags + frame + skb_shared_info
+ */
+ u32 max_mtu;
+
u8 channel_id;
+
+ struct page_pool *page_pool;
};
struct lan966x_tx_dcb_buf {
- struct net_device *dev;
- struct sk_buff *skb;
dma_addr_t dma_addr;
- bool used;
- bool ptp;
+ struct net_device *dev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ } data;
+ u32 len;
+ u32 used : 1;
+ u32 ptp : 1;
+ u32 use_skb : 1;
+ u32 xdp_ndo : 1;
};
struct lan966x_tx {
@@ -271,6 +312,9 @@ struct lan966x {
struct lan966x_port *mirror_monitor;
u32 mirror_mask[2];
u32 mirror_count;
+
+ /* vcap */
+ struct vcap_control *vcap_ctrl;
};
struct lan966x_port_config {
@@ -288,6 +332,7 @@ struct lan966x_port_tc {
unsigned long police_id;
unsigned long ingress_mirror_id;
unsigned long egress_mirror_id;
+ unsigned long goto_id;
struct flow_stats police_stat;
struct flow_stats mirror_stat;
};
@@ -320,6 +365,9 @@ struct lan966x_port {
enum netdev_lag_hash hash_type;
struct lan966x_port_tc tc;
+
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
@@ -337,6 +385,8 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
+void lan966x_ifh_set_port(void *ifh, u64 bypass);
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
@@ -435,14 +485,21 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
u32 lan966x_ptp_get_period_ps(void);
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int lan966x_ptp_setup_traps(struct lan966x_port *port, struct ifreq *ifr);
+int lan966x_ptp_del_traps(struct lan966x_port *port);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
+ struct xdp_frame *frame,
+ struct page *page,
+ bool dma_map);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
int lan966x_fdma_init(struct lan966x *lan966x);
void lan966x_fdma_deinit(struct lan966x *lan966x);
irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x);
int lan966x_lag_port_join(struct lan966x_port *port,
struct net_device *brport_dev,
@@ -527,6 +584,36 @@ void lan966x_mirror_port_stats(struct lan966x_port *port,
struct flow_stats *stats,
bool ingress);
+int lan966x_xdp_port_init(struct lan966x_port *port);
+void lan966x_xdp_port_deinit(struct lan966x_port *port);
+int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int lan966x_xdp_run(struct lan966x_port *port,
+ struct page *page,
+ u32 data_len);
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags);
+bool lan966x_xdp_present(struct lan966x *lan966x);
+static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
+{
+ return !!port->xdp_prog;
+}
+
+int lan966x_vcap_init(struct lan966x *lan966x);
+void lan966x_vcap_deinit(struct lan966x *lan966x);
+
+int lan966x_tc_flower(struct lan966x_port *port,
+ struct flow_cls_offload *f);
+
+int lan966x_goto_port_add(struct lan966x_port *port,
+ struct flow_action_entry *act,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack);
+int lan966x_goto_port_del(struct lan966x_port *port,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack);
+
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
int gbase, int ginst,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index e4ac59480514..c5f9803e6e63 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -5,7 +5,6 @@
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/phy/phy.h>
-#include <linux/sfp.h>
#include "lan966x_main.h"
@@ -125,7 +124,6 @@ static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
}
const struct phylink_mac_ops lan966x_phylink_mac_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = lan966x_phylink_mac_select,
.mac_config = lan966x_phylink_mac_config,
.mac_prepare = lan966x_phylink_mac_prepare,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index e5a2bbe064f8..300fe4005919 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -3,6 +3,8 @@
#include <linux/ptp_classify.h>
#include "lan966x_main.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
#define LAN966X_MAX_PTP_ID 512
@@ -18,6 +20,17 @@
#define TOD_ACC_PIN 0x7
+/* This represents the base rule ID for the PTP rules that are added in the
+ * VCAP to trap frames to CPU. This number needs to be bigger than the maximum
+ * number of entries that can exist in the VCAP.
+ */
+#define LAN966X_VCAP_PTP_RULE_ID 1000000
+#define LAN966X_VCAP_L2_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 0)
+#define LAN966X_VCAP_IPV4_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 1)
+#define LAN966X_VCAP_IPV4_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 2)
+#define LAN966X_VCAP_IPV6_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 3)
+#define LAN966X_VCAP_IPV6_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 4)
+
enum {
PTP_PIN_ACTION_IDLE = 0,
PTP_PIN_ACTION_LOAD,
@@ -35,19 +48,228 @@ static u64 lan966x_ptp_get_nominal_value(void)
return 0x304d4873ecade305;
}
+static int lan966x_ptp_add_trap(struct lan966x_port *port,
+ int (*add_ptp_key)(struct vcap_rule *vrule,
+ struct lan966x_port*),
+ u32 rule_id,
+ u16 proto)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct vcap_rule *vrule;
+ int err;
+
+ vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+ if (vrule) {
+ u32 value, mask;
+
+ /* Just modify the ingress port mask and exit */
+ vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask);
+ mask &= ~BIT(port->chip_port);
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
+ value, mask);
+
+ err = vcap_mod_rule(vrule);
+ goto free_rule;
+ }
+
+ vrule = vcap_alloc_rule(lan966x->vcap_ctrl, port->dev,
+ LAN966X_VCAP_CID_IS2_L0,
+ VCAP_USER_PTP, 0, rule_id);
+ if (!vrule)
+ return -ENOMEM;
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ err = add_ptp_key(vrule, port);
+ if (err)
+ goto free_rule;
+
+ err = vcap_set_rule_set_actionset(vrule, VCAP_AFS_BASE_TYPE);
+ err |= vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
+ err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
+ err |= vcap_val_rule(vrule, proto);
+ if (err)
+ goto free_rule;
+
+ err = vcap_add_rule(vrule);
+
+free_rule:
+ /* Free the local copy of the rule */
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_ptp_del_trap(struct lan966x_port *port,
+ u32 rule_id)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct vcap_rule *vrule;
+ u32 value, mask;
+ int err;
+
+ vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+ if (!vrule)
+ return -EEXIST;
+
+ vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask);
+ mask |= BIT(port->chip_port);
+
+ /* No other port requires this trap, so it is safe to remove it */
+ if (mask == GENMASK(lan966x->num_phys_ports, 0)) {
+ err = vcap_del_rule(lan966x->vcap_ctrl, port->dev, rule_id);
+ goto free_rule;
+ }
+
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, value, mask);
+ err = vcap_mod_rule(vrule);
+
+free_rule:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_ptp_add_l2_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ETYPE, ETH_P_1588, ~0);
+}
+
+static int lan966x_ptp_add_ip_event_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_EV_PORT, ~0) ||
+ vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
+}
+
+static int lan966x_ptp_add_ip_general_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_GEN_PORT, ~0) ||
+ vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
+}
+
+static int lan966x_ptp_add_l2_rule(struct lan966x_port *port)
+{
+ return lan966x_ptp_add_trap(port, lan966x_ptp_add_l2_key,
+ LAN966X_VCAP_L2_PTP_TRAP, ETH_P_ALL);
+}
+
+static int lan966x_ptp_add_ipv4_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
+ LAN966X_VCAP_IPV4_EV_PTP_TRAP, ETH_P_IP);
+ if (err)
+ return err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
+ LAN966X_VCAP_IPV4_GEN_PTP_TRAP, ETH_P_IP);
+ if (err)
+ lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_add_ipv6_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
+ LAN966X_VCAP_IPV6_EV_PTP_TRAP, ETH_P_IPV6);
+ if (err)
+ return err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
+ LAN966X_VCAP_IPV6_GEN_PTP_TRAP, ETH_P_IPV6);
+ if (err)
+ lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_del_l2_rule(struct lan966x_port *port)
+{
+ return lan966x_ptp_del_trap(port, LAN966X_VCAP_L2_PTP_TRAP);
+}
+
+static int lan966x_ptp_del_ipv4_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
+ err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_GEN_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_del_ipv6_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
+ err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_GEN_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_add_traps(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_l2_rule(port);
+ if (err)
+ goto err_l2;
+
+ err = lan966x_ptp_add_ipv4_rules(port);
+ if (err)
+ goto err_ipv4;
+
+ err = lan966x_ptp_add_ipv6_rules(port);
+ if (err)
+ goto err_ipv6;
+
+ return err;
+
+err_ipv6:
+ lan966x_ptp_del_ipv4_rules(port);
+err_ipv4:
+ lan966x_ptp_del_l2_rule(port);
+err_l2:
+ return err;
+}
+
+int lan966x_ptp_del_traps(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_l2_rule(port);
+ err |= lan966x_ptp_del_ipv4_rules(port);
+ err |= lan966x_ptp_del_ipv6_rules(port);
+
+ return err;
+}
+
+int lan966x_ptp_setup_traps(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.rx_filter == HWTSTAMP_FILTER_NONE)
+ return lan966x_ptp_del_traps(port);
+ else
+ return lan966x_ptp_add_traps(port);
+}
+
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
{
struct lan966x *lan966x = port->lan966x;
struct hwtstamp_config cfg;
struct lan966x_phc *phc;
- /* For now don't allow to run ptp on ports that are part of a bridge,
- * because in case of transparent clock the HW will still forward the
- * frames, so there would be duplicate frames
- */
- if (lan966x->bridge_mask & BIT(port->chip_port))
- return -EINVAL;
-
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index fb5087fef22e..9767b5a1c958 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -25,6 +25,7 @@ enum lan966x_target {
TARGET_QSYS = 46,
TARGET_REW = 47,
TARGET_SYS = 52,
+ TARGET_VCAP = 61,
NUM_TARGETS = 66
};
@@ -315,6 +316,69 @@ enum lan966x_target {
#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+/* ANA:PORT:VCAP_S2_CFG */
+#define ANA_VCAP_S2_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 28, 0, 1, 4)
+
+#define ANA_VCAP_S2_CFG_ISDX_ENA GENMASK(20, 19)
+#define ANA_VCAP_S2_CFG_ISDX_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ISDX_ENA, x)
+#define ANA_VCAP_S2_CFG_ISDX_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ISDX_ENA, x)
+
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA GENMASK(18, 17)
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA, x)
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA, x)
+
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA GENMASK(16, 15)
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA, x)
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA, x)
+
+#define ANA_VCAP_S2_CFG_ENA BIT(14)
+#define ANA_VCAP_S2_CFG_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ENA, x)
+#define ANA_VCAP_S2_CFG_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ENA, x)
+
+#define ANA_VCAP_S2_CFG_SNAP_DIS GENMASK(13, 12)
+#define ANA_VCAP_S2_CFG_SNAP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_SNAP_DIS, x)
+#define ANA_VCAP_S2_CFG_SNAP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_SNAP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_ARP_DIS GENMASK(11, 10)
+#define ANA_VCAP_S2_CFG_ARP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ARP_DIS, x)
+#define ANA_VCAP_S2_CFG_ARP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ARP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS GENMASK(9, 8)
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP_TCPUDP_DIS, x)
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP_TCPUDP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS GENMASK(7, 6)
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP_OTHER_DIS, x)
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP_OTHER_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP6_CFG GENMASK(5, 2)
+#define ANA_VCAP_S2_CFG_IP6_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP6_CFG, x)
+#define ANA_VCAP_S2_CFG_IP6_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP6_CFG, x)
+
+#define ANA_VCAP_S2_CFG_OAM_DIS GENMASK(1, 0)
+#define ANA_VCAP_S2_CFG_OAM_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_OAM_DIS, x)
+#define ANA_VCAP_S2_CFG_OAM_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_OAM_DIS, x)
+
/* ANA:PORT:CPU_FWD_CFG */
#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
@@ -1506,4 +1570,136 @@ enum lan966x_target {
#define SYS_RAM_INIT_RAM_INIT_GET(x)\
FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
+/* VCAP:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_UPDATE_CTRL(t) __REG(TARGET_VCAP, t, 3, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_UPDATE_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_UPDATE_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_CMD, x)
+#define VCAP_UPDATE_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_CMD, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ADDR, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_SHOT, x)
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_CLEAR_CACHE, x)
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_MV_CFG(t) __REG(TARGET_VCAP, t, 3, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_MV_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_MV_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_MV_CFG_MV_NUM_POS, x)
+#define VCAP_MV_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_MV_CFG_MV_NUM_POS, x)
+
+#define VCAP_MV_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_MV_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_MV_CFG_MV_SIZE, x)
+#define VCAP_MV_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_MV_CFG_MV_SIZE, x)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ENTRY_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_MASK_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ACTION_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_CNT_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_CNT_FW_DAT(t) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_TG_DAT(t) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_CORE_IDX(t) __REG(TARGET_VCAP, t, 3, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_CORE_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_CORE_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_CORE_IDX_CORE_IDX, x)
+#define VCAP_CORE_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_CORE_IDX_CORE_IDX, x)
+
+/* VCAP:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_CORE_MAP(t) __REG(TARGET_VCAP, t, 3, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_CORE_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_CORE_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_CORE_MAP_CORE_MAP, x)
+#define VCAP_CORE_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_CORE_MAP_CORE_MAP, x)
+
+/* VCAP:VCAP_CONST:VCAP_VER */
+#define VCAP_VER(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ENTRY_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ENTRY_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ENTRY_SWCNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ENTRY_TG_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ACTION_DEF_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ACTION_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:CNT_WIDTH */
+#define VCAP_CNT_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:CORE_CNT */
+#define VCAP_CORE_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:IF_CNT */
+#define VCAP_IF_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 36, 0, 1, 4)
+
#endif /* _LAN966X_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
index 651d5493ae55..01072121c999 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -69,6 +69,8 @@ static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSMATCHALL:
return lan966x_tc_matchall(port, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return lan966x_tc_flower(port, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
new file mode 100644
index 000000000000..ba3fa917d6b7
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+struct lan966x_tc_flower_parse_usage {
+ struct flow_cls_offload *f;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ unsigned int used_keys;
+ u16 l3_proto;
+};
+
+static int lan966x_tc_flower_handler_ethaddr_usage(struct lan966x_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
+ enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
+ struct flow_match_eth_addrs match;
+ struct vcap_u48_key smac, dmac;
+ int err = 0;
+
+ flow_rule_match_eth_addrs(st->frule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
+ vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ if (err)
+ goto out;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
+ vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->f->common.extack, "eth_addr parse error");
+ return err;
+}
+
+static int
+(*lan966x_tc_flower_handlers_usage[])(struct lan966x_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = lan966x_tc_flower_handler_ethaddr_usage,
+};
+
+static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ u16 *l3_proto)
+{
+ struct lan966x_tc_flower_parse_usage state = {
+ .f = f,
+ .vrule = vrule,
+ .l3_proto = ETH_P_ALL,
+ };
+ int err = 0;
+
+ state.frule = flow_cls_offload_flow_rule(f);
+ for (int i = 0; i < ARRAY_SIZE(lan966x_tc_flower_handlers_usage); ++i) {
+ if (!flow_rule_match_key(state.frule, i) ||
+ !lan966x_tc_flower_handlers_usage[i])
+ continue;
+
+ err = lan966x_tc_flower_handlers_usage[i](&state);
+ if (err)
+ return err;
+ }
+
+ if (l3_proto)
+ *l3_proto = state.l3_proto;
+
+ return err;
+}
+
+static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
+ struct flow_action_entry *actent, *last_actent = NULL;
+ struct flow_action *act = &rule->action;
+ u64 action_mask = 0;
+ int idx;
+
+ if (!flow_action_has_entries(act)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
+ return -EINVAL;
+ }
+
+ if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(idx, actent, act) {
+ if (action_mask & BIT(actent->id)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "More actions of the same type");
+ return -EINVAL;
+ }
+ action_mask |= BIT(actent->id);
+ last_actent = actent; /* Save last action for later check */
+ }
+
+ /* Check that last action is a goto */
+ if (last_actent->id != FLOW_ACTION_GOTO) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Last action must be 'goto'");
+ return -EINVAL;
+ }
+
+ /* Check if the goto chain is in the next lookup */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+
+ /* Catch unsupported combinations of actions */
+ if (action_mask & BIT(FLOW_ACTION_TRAP) &&
+ action_mask & BIT(FLOW_ACTION_ACCEPT)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine pass and trap action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_flower_add(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct flow_action_entry *act;
+ u16 l3_proto = ETH_P_ALL;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ int err, idx;
+
+ err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl, f,
+ admin);
+ if (err)
+ return err;
+
+ vrule = vcap_alloc_rule(port->lan966x->vcap_ctrl, port->dev,
+ f->common.chain_index, VCAP_USER_TC,
+ f->common.prio, 0);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ vrule->cookie = f->cookie;
+ err = lan966x_tc_flower_use_dissectors(f, admin, vrule, &l3_proto);
+ if (err)
+ goto out;
+
+ frule = flow_cls_offload_flow_rule(f);
+
+ flow_action_for_each(idx, act, &frule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_TRAP:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ err |= vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM,
+ 0);
+ err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ LAN966X_PMM_REPLACE);
+ err |= vcap_set_rule_set_actionset(vrule,
+ VCAP_AFS_BASE_TYPE);
+ if (err)
+ goto out;
+
+ break;
+ case FLOW_ACTION_GOTO:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported TC action");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ err = vcap_val_rule(vrule, l3_proto);
+ if (err) {
+ vcap_set_tc_exterr(f, vrule);
+ goto out;
+ }
+
+ err = vcap_add_rule(vrule);
+ if (err)
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Could not add the filter");
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_tc_flower_del(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_control *vctrl;
+ int err = -ENOENT, rule_id;
+
+ vctrl = port->lan966x->vcap_ctrl;
+ while (true) {
+ rule_id = vcap_lookup_rule_by_cookie(vctrl, f->cookie);
+ if (rule_id <= 0)
+ break;
+
+ err = vcap_del_rule(vctrl, port->dev, rule_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Cannot delete rule");
+ break;
+ }
+ }
+
+ return err;
+}
+
+int lan966x_tc_flower(struct lan966x_port *port,
+ struct flow_cls_offload *f)
+{
+ struct vcap_admin *admin;
+
+ admin = vcap_find_admin(port->lan966x->vcap_ctrl,
+ f->common.chain_index);
+ if (!admin) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Invalid chain");
+ return -EINVAL;
+ }
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return lan966x_tc_flower_add(port, f, admin);
+ case FLOW_CLS_DESTROY:
+ return lan966x_tc_flower_del(port, f, admin);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
index 7368433b9277..a539abaad9b6 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -23,6 +23,9 @@ static int lan966x_tc_matchall_add(struct lan966x_port *port,
case FLOW_ACTION_MIRRED:
return lan966x_mirror_port_add(port, act, f->cookie,
ingress, f->common.extack);
+ case FLOW_ACTION_GOTO:
+ return lan966x_goto_port_add(port, act, f->cookie,
+ f->common.extack);
default:
NL_SET_ERR_MSG_MOD(f->common.extack,
"Unsupported action");
@@ -43,6 +46,9 @@ static int lan966x_tc_matchall_del(struct lan966x_port *port,
f->cookie == port->tc.egress_mirror_id) {
return lan966x_mirror_port_del(port, ingress,
f->common.extack);
+ } else if (f->cookie == port->tc.goto_id) {
+ return lan966x_goto_port_del(port, f->cookie,
+ f->common.extack);
} else {
NL_SET_ERR_MSG_MOD(f->common.extack,
"Unsupported action");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
new file mode 100644
index 000000000000..928e711960e6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
@@ -0,0 +1,1608 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "lan966x_vcap_ag_api.h"
+
+/* keyfields */
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 16,
+ },
+ [VCAP_KF_L2_FRM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 155,
+ .width = 4,
+ },
+ [VCAP_KF_L2_PAYLOAD0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 8,
+ },
+ [VCAP_KF_L2_PAYLOAD2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 183,
+ .width = 3,
+ },
+};
+
+static const struct vcap_field is2_mac_llc_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_L2_LLC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 139,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_mac_snap_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SNAP] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 139,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 48,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 121,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 122,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 138,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 8,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 162,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 164,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 165,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_1588_DOM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 8,
+ },
+ [VCAP_KF_L4_1588_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 4,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 48,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 121,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U56,
+ .offset = 129,
+ .width = 56,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 44,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 172,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is2_oam_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_OAM_MEL_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 7,
+ },
+ [VCAP_KF_OAM_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 146,
+ .width = 5,
+ },
+ [VCAP_KF_OAM_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_MEPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 183,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 184,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_DETECTED] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 185,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_ip6_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 37,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 50,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 178,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 306,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 307,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 308,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 324,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 340,
+ .width = 8,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 348,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 349,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 350,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 351,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 352,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 353,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 354,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 355,
+ .width = 1,
+ },
+ [VCAP_KF_L4_1588_DOM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 356,
+ .width = 8,
+ },
+ [VCAP_KF_L4_1588_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 364,
+ .width = 4,
+ },
+};
+
+static const struct vcap_field is2_ip6_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 37,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 50,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 178,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 306,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 307,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U56,
+ .offset = 315,
+ .width = 56,
+ },
+};
+
+static const struct vcap_field is2_smac_sip4_keyfield[] = {
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 4,
+ .width = 48,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 52,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_smac_sip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 4,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 8,
+ .width = 48,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 56,
+ .width = 128,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_MAC_LLC] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_MAC_SNAP] = {
+ .type_id = 2,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_OAM] = {
+ .type_id = 7,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_TCP_UDP] = {
+ .type_id = 0,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_OTHER] = {
+ .type_id = 1,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_SMAC_SIP4] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_SMAC_SIP6] = {
+ .type_id = 8,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_MAC_LLC] = is2_mac_llc_keyfield,
+ [VCAP_KFS_MAC_SNAP] = is2_mac_snap_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_OAM] = is2_oam_keyfield,
+ [VCAP_KFS_IP6_TCP_UDP] = is2_ip6_tcp_udp_keyfield,
+ [VCAP_KFS_IP6_OTHER] = is2_ip6_other_keyfield,
+ [VCAP_KFS_SMAC_SIP4] = is2_smac_sip4_keyfield,
+ [VCAP_KFS_SMAC_SIP6] = is2_smac_sip6_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_MAC_LLC] = ARRAY_SIZE(is2_mac_llc_keyfield),
+ [VCAP_KFS_MAC_SNAP] = ARRAY_SIZE(is2_mac_snap_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_OAM] = ARRAY_SIZE(is2_oam_keyfield),
+ [VCAP_KFS_IP6_TCP_UDP] = ARRAY_SIZE(is2_ip6_tcp_udp_keyfield),
+ [VCAP_KFS_IP6_OTHER] = ARRAY_SIZE(is2_ip6_other_keyfield),
+ [VCAP_KFS_SMAC_SIP4] = ARRAY_SIZE(is2_smac_sip4_keyfield),
+ [VCAP_KFS_SMAC_SIP6] = ARRAY_SIZE(is2_smac_sip6_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 3,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 2,
+ },
+ [VCAP_AF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 9,
+ },
+ [VCAP_AF_POLICE_VCAP_ONLY] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 8,
+ },
+ [VCAP_AF_REW_OP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 16,
+ },
+ [VCAP_AF_ISDX_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_AF_ACL_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 6,
+ },
+};
+
+static const struct vcap_field is2_smac_sip_actionfield[] = {
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 3,
+ },
+ [VCAP_AF_FWD_KILL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_AF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_AFS_SMAC_SIP] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+ [VCAP_AFS_SMAC_SIP] = is2_smac_sip_actionfield,
+};
+
+/* actionfield_set map size */
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+ [VCAP_AFS_SMAC_SIP] = ARRAY_SIZE(is2_smac_sip_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is2_x4_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 192,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 288,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [4] = is2_x4_keyfield_set_typegroups,
+ [2] = is2_x2_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup is2_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 31,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [2] = is2_x2_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [5] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_REW_OP] = "REW_OP",
+};
+
+/* VCAPs */
+const struct vcap_info lan966x_vcaps[] = {
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 64,
+ .sw_count = 4,
+ .sw_width = 96,
+ .sticky_width = 32,
+ .act_width = 31,
+ .default_cnt = 11,
+ .require_cnt_dis = 1,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics lan966x_vcap_stats = {
+ .name = "lan966x",
+ .count = 1,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h
new file mode 100644
index 000000000000..0d8bbee73b2a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef __LAN966X_VCAP_AG_API_H__
+#define __LAN966X_VCAP_AG_API_H__
+
+#include "vcap_api.h"
+
+extern const struct vcap_info lan966x_vcaps[];
+extern const struct vcap_statistics lan966x_vcap_stats;
+
+#endif /* __LAN966X_VCAP_AG_API_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
new file mode 100644
index 000000000000..d8dc9fbb81e1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define STREAMSIZE (64 * 4)
+
+#define LAN966X_IS2_LOOKUPS 2
+
+enum vcap_is2_port_sel_ipv6 {
+ VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
+ VCAP_IS2_PS_IPV6_STD,
+ VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
+ VCAP_IS2_PS_IPV6_MAC_ETYPE,
+};
+
+static struct lan966x_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int tgt_inst; /* hardware instance number */
+ int lookups; /* number of lookups in this vcap type */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses */
+} lan966x_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .tgt_inst = 2,
+ .lookups = LAN966X_IS2_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_IS2_L0,
+ .last_cid = LAN966X_VCAP_CID_IS2_MAX,
+ .count = 256,
+ },
+};
+
+struct lan966x_vcap_cmd_cb {
+ struct lan966x *lan966x;
+ u32 instance;
+};
+
+static u32 lan966x_vcap_read_update_ctrl(const struct lan966x_vcap_cmd_cb *cb)
+{
+ return lan_rd(cb->lan966x, VCAP_UPDATE_CTRL(cb->instance));
+}
+
+static void lan966x_vcap_wait_update(struct lan966x *lan966x, int instance)
+{
+ const struct lan966x_vcap_cmd_cb cb = { .lan966x = lan966x,
+ .instance = instance };
+ u32 val;
+
+ readx_poll_timeout(lan966x_vcap_read_update_ctrl, &cb, val,
+ (val & VCAP_UPDATE_CTRL_UPDATE_SHOT) == 0, 10,
+ 100000);
+}
+
+static void __lan966x_vcap_range_init(struct lan966x *lan966x,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count)
+{
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(0) |
+ VCAP_MV_CFG_MV_SIZE_SET(count - 1),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT_SET(1),
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static int lan966x_vcap_cid_to_lookup(int cid)
+{
+ if (cid >= LAN966X_VCAP_CID_IS2_L1 &&
+ cid < LAN966X_VCAP_CID_IS2_MAX)
+ return 1;
+
+ return 0;
+}
+
+static int
+lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool found = false;
+ u32 val;
+
+ /* Check if the port keyset selection is enabled */
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+ if (!ANA_VCAP_S2_CFG_ENA_GET(val))
+ return -ENOENT;
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL)
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_SNAP) {
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_LLC);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_SNAP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_CFM) {
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_OAM);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << lookup)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_OTHER);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_TCP_UDP);
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ }
+
+ found = true;
+ }
+
+ if (!found)
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+
+ return 0;
+}
+
+static enum vcap_keyfield_set
+lan966x_vcap_validate_keyset(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ struct vcap_keyset_list keysetlist = {};
+ enum vcap_keyfield_set keysets[10] = {};
+ int lookup;
+ int err;
+
+ if (!kslist || kslist->cnt == 0)
+ return VCAP_KFS_NO_VALUE;
+
+ lookup = lan966x_vcap_cid_to_lookup(rule->vcap_chain_id);
+ keysetlist.max = ARRAY_SIZE(keysets);
+ keysetlist.keysets = keysets;
+ err = lan966x_vcap_is2_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ if (err)
+ return VCAP_KFS_NO_VALUE;
+
+ /* Check if there is a match and return the match */
+ for (int i = 0; i < kslist->cnt; ++i)
+ for (int j = 0; j < keysetlist.cnt; ++j)
+ if (kslist->keysets[i] == keysets[j])
+ return kslist->keysets[i];
+
+ return VCAP_KFS_NO_VALUE;
+}
+
+static bool lan966x_vcap_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= LAN966X_VCAP_CID_IS2_L0 &&
+ rule->vcap_chain_id < LAN966X_VCAP_CID_IS2_L1);
+}
+
+static void lan966x_vcap_add_default_fields(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ u32 value, mask;
+
+ if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask))
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
+ ~BIT(port->chip_port));
+
+ if (lan966x_vcap_is_first_chain(rule))
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void lan966x_vcap_cache_erase(struct vcap_admin *admin)
+{
+ memset(admin->cache.keystream, 0, STREAMSIZE);
+ memset(admin->cache.maskstream, 0, STREAMSIZE);
+ memset(admin->cache.actionstream, 0, STREAMSIZE);
+ memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
+}
+
+static void lan966x_vcap_cache_write(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 *keystr, *mskstr, *actstr;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (int i = 0; i < count; ++i) {
+ lan_wr(keystr[i] & mskstr[i], lan966x,
+ VCAP_ENTRY_DAT(admin->tgt_inst, i));
+ lan_wr(~mskstr[i], lan966x,
+ VCAP_MASK_DAT(admin->tgt_inst, i));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (int i = 0; i < count; ++i)
+ lan_wr(actstr[i], lan966x,
+ VCAP_ACTION_DAT(admin->tgt_inst, i));
+ break;
+ case VCAP_SEL_COUNTER:
+ admin->cache.sticky = admin->cache.counter > 0;
+ lan_wr(admin->cache.counter, lan966x,
+ VCAP_CNT_DAT(admin->tgt_inst, 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void lan966x_vcap_cache_read(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int instance = admin->tgt_inst;
+ u32 *keystr, *mskstr, *actstr;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (int i = 0; i < count; ++i) {
+ keystr[i] =
+ lan_rd(lan966x, VCAP_ENTRY_DAT(instance, i));
+ mskstr[i] =
+ ~lan_rd(lan966x, VCAP_MASK_DAT(instance, i));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (int i = 0; i < count; ++i)
+ actstr[i] =
+ lan_rd(lan966x, VCAP_ACTION_DAT(instance, i));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ lan_rd(lan966x, VCAP_CNT_DAT(instance, 0));
+ admin->cache.sticky = admin->cache.counter > 0;
+ }
+}
+
+static void lan966x_vcap_range_init(struct net_device *dev,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ __lan966x_vcap_range_init(lan966x, admin, addr, count);
+}
+
+static void lan966x_vcap_update(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel,
+ u32 addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool clear;
+
+ clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(0) |
+ VCAP_MV_CFG_MV_SIZE_SET(0),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT,
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static void lan966x_vcap_move(struct net_device *dev,
+ struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
+
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
+
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_MV_CFG_MV_SIZE_SET(mv_size),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT,
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+static int lan966x_vcap_enable(struct net_device *dev,
+ struct vcap_admin *admin,
+ bool enable)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(enable),
+ ANA_VCAP_S2_CFG_ENA,
+ lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+
+ return 0;
+}
+
+static struct vcap_operations lan966x_vcap_ops = {
+ .validate_keyset = lan966x_vcap_validate_keyset,
+ .add_default_fields = lan966x_vcap_add_default_fields,
+ .cache_erase = lan966x_vcap_cache_erase,
+ .cache_write = lan966x_vcap_cache_write,
+ .cache_read = lan966x_vcap_cache_read,
+ .init = lan966x_vcap_range_init,
+ .update = lan966x_vcap_update,
+ .move = lan966x_vcap_move,
+ .port_info = lan966x_vcap_port_info,
+ .enable = lan966x_vcap_enable,
+};
+
+static void lan966x_vcap_admin_free(struct vcap_admin *admin)
+{
+ if (!admin)
+ return;
+
+ kfree(admin->cache.keystream);
+ kfree(admin->cache.maskstream);
+ kfree(admin->cache.actionstream);
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+}
+
+static struct vcap_admin *
+lan966x_vcap_admin_alloc(struct lan966x *lan966x, struct vcap_control *ctrl,
+ const struct lan966x_vcap_inst *cfg)
+{
+ struct vcap_admin *admin;
+
+ admin = kzalloc(sizeof(*admin), GFP_KERNEL);
+ if (!admin)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&admin->lock);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+
+ admin->vtype = cfg->vtype;
+ admin->vinst = 0;
+ admin->w32be = true;
+ admin->tgt_inst = cfg->tgt_inst;
+
+ admin->lookups = cfg->lookups;
+ admin->lookups_per_instance = cfg->lookups;
+
+ admin->first_cid = cfg->first_cid;
+ admin->last_cid = cfg->last_cid;
+
+ admin->cache.keystream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.maskstream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.actionstream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ if (!admin->cache.keystream ||
+ !admin->cache.maskstream ||
+ !admin->cache.actionstream) {
+ lan966x_vcap_admin_free(admin);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return admin;
+}
+
+static void lan966x_vcap_block_init(struct lan966x *lan966x,
+ struct vcap_admin *admin,
+ struct lan966x_vcap_inst *cfg)
+{
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+
+ lan_wr(VCAP_CORE_IDX_CORE_IDX_SET(0),
+ lan966x, VCAP_CORE_IDX(admin->tgt_inst));
+ lan_wr(VCAP_CORE_MAP_CORE_MAP_SET(1),
+ lan966x, VCAP_CORE_MAP(admin->tgt_inst));
+
+ __lan966x_vcap_range_init(lan966x, admin, admin->first_valid_addr,
+ admin->last_valid_addr -
+ admin->first_valid_addr);
+}
+
+static void lan966x_vcap_port_key_deselection(struct lan966x *lan966x,
+ struct vcap_admin *admin)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(0, lan966x, ANA_VCAP_S2_CFG(p));
+}
+
+int lan966x_vcap_init(struct lan966x *lan966x)
+{
+ struct lan966x_vcap_inst *cfg;
+ struct vcap_control *ctrl;
+ struct vcap_admin *admin;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->vcaps = lan966x_vcaps;
+ ctrl->stats = &lan966x_vcap_stats;
+ ctrl->ops = &lan966x_vcap_ops;
+
+ INIT_LIST_HEAD(&ctrl->list);
+ for (int i = 0; i < ARRAY_SIZE(lan966x_vcap_inst_cfg); ++i) {
+ cfg = &lan966x_vcap_inst_cfg[i];
+
+ admin = lan966x_vcap_admin_alloc(lan966x, ctrl, cfg);
+ if (IS_ERR(admin))
+ return PTR_ERR(admin);
+
+ lan966x_vcap_block_init(lan966x, admin, cfg);
+ lan966x_vcap_port_key_deselection(lan966x, admin);
+
+ list_add_tail(&admin->list, &ctrl->list);
+ }
+
+ lan966x->vcap_ctrl = ctrl;
+
+ return 0;
+}
+
+void lan966x_vcap_deinit(struct lan966x *lan966x)
+{
+ struct vcap_admin *admin, *admin_next;
+ struct vcap_control *ctrl;
+
+ ctrl = lan966x->vcap_ctrl;
+ if (!ctrl)
+ return;
+
+ list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) {
+ lan966x_vcap_port_key_deselection(lan966x, admin);
+ vcap_del_rules(ctrl, admin);
+ list_del(&admin->list);
+ lan966x_vcap_admin_free(admin);
+ }
+
+ kfree(ctrl);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
new file mode 100644
index 000000000000..2e6f486ec67d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+
+#include "lan966x_main.h"
+
+static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct bpf_prog *old_prog;
+ bool old_xdp, new_xdp;
+ int err;
+
+ if (!lan966x->fdma) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Allow to set xdp only when using fdma");
+ return -EOPNOTSUPP;
+ }
+
+ old_xdp = lan966x_xdp_present(lan966x);
+ old_prog = xchg(&port->xdp_prog, xdp->prog);
+ new_xdp = lan966x_xdp_present(lan966x);
+
+ if (old_xdp == new_xdp)
+ goto out;
+
+ err = lan966x_fdma_reload_page_pool(lan966x);
+ if (err) {
+ xchg(&port->xdp_prog, old_prog);
+ return err;
+ }
+
+out:
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return lan966x_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int nxmit = 0;
+
+ for (int i = 0; i < n; ++i) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = lan966x_fdma_xmit_xdpf(port, xdpf, NULL, true);
+ if (err)
+ break;
+
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
+{
+ struct bpf_prog *xdp_prog = port->xdp_prog;
+ struct lan966x *lan966x = port->lan966x;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 act;
+
+ xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order,
+ &port->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page),
+ IFH_LEN_BYTES + XDP_PACKET_HEADROOM,
+ data_len - IFH_LEN_BYTES, false);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return FDMA_PASS;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (!xdpf)
+ return FDMA_DROP;
+
+ return lan966x_fdma_xmit_xdpf(port, xdpf, page, false) ?
+ FDMA_DROP : FDMA_TX;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(port->dev, &xdp, xdp_prog))
+ return FDMA_DROP;
+
+ return FDMA_REDIRECT;
+ default:
+ bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(port->dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ return FDMA_DROP;
+ }
+}
+
+bool lan966x_xdp_present(struct lan966x *lan966x)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ if (lan966x_xdp_port_present(lan966x->ports[p]))
+ return true;
+ }
+
+ return false;
+}
+
+int lan966x_xdp_port_init(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ return xdp_rxq_info_reg(&port->xdp_rxq, port->dev, 0,
+ lan966x->napi.napi_id);
+}
+
+void lan966x_xdp_port_deinit(struct lan966x_port *port)
+{
+ if (xdp_rxq_info_is_reg(&port->xdp_rxq))
+ xdp_rxq_info_unreg(&port->xdp_rxq);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index cc5e48e1bb4c..f58c506bda22 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -9,5 +9,17 @@ config SPARX5_SWITCH
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
+ select VCAP
help
This driver supports the Sparx5 network switch device.
+
+config SPARX5_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on SPARX5_SWITCH && DCB
+ default y
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver. This can be used to assign priority to traffic, based on
+ DSCP and PCP.
+
+ If unsure, set to Y.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index d1c6ad966747..d0ed7090aa54 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -5,7 +5,14 @@
obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
-sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
+sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
- sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o
+ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
+ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o sparx5_tc_matchall.o
+
+sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
+sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
+
+# Provide include files
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
new file mode 100644
index 000000000000..74abb946b2a3
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/dcbnl.h>
+
+#include "sparx5_port.h"
+
+enum sparx5_dcb_apptrust_values {
+ SPARX5_DCB_APPTRUST_EMPTY,
+ SPARX5_DCB_APPTRUST_DSCP,
+ SPARX5_DCB_APPTRUST_PCP,
+ SPARX5_DCB_APPTRUST_DSCP_PCP,
+ __SPARX5_DCB_APPTRUST_MAX
+};
+
+static const struct sparx5_dcb_apptrust {
+ u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1];
+ int nselectors;
+} *sparx5_port_apptrust[SPX5_PORTS];
+
+static const char *sparx5_dcb_apptrust_names[__SPARX5_DCB_APPTRUST_MAX] = {
+ [SPARX5_DCB_APPTRUST_EMPTY] = "empty",
+ [SPARX5_DCB_APPTRUST_DSCP] = "dscp",
+ [SPARX5_DCB_APPTRUST_PCP] = "pcp",
+ [SPARX5_DCB_APPTRUST_DSCP_PCP] = "dscp pcp"
+};
+
+/* Sparx5 supported apptrust policies */
+static const struct sparx5_dcb_apptrust
+ sparx5_dcb_apptrust_policies[__SPARX5_DCB_APPTRUST_MAX] = {
+ /* Empty *must* be first */
+ [SPARX5_DCB_APPTRUST_EMPTY] = { { 0 }, 0 },
+ [SPARX5_DCB_APPTRUST_DSCP] = { { IEEE_8021QAZ_APP_SEL_DSCP }, 1 },
+ [SPARX5_DCB_APPTRUST_PCP] = { { DCB_APP_SEL_PCP }, 1 },
+ [SPARX5_DCB_APPTRUST_DSCP_PCP] = { { IEEE_8021QAZ_APP_SEL_DSCP,
+ DCB_APP_SEL_PCP }, 2 },
+};
+
+/* Validate app entry.
+ *
+ * Check for valid selectors and valid protocol and priority ranges.
+ */
+static int sparx5_dcb_app_validate(struct net_device *dev,
+ const struct dcb_app *app)
+{
+ int err = 0;
+
+ switch (app->selector) {
+ /* Default priority checks */
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol != 0)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
+ /* Dscp checks */
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= SPARX5_PORT_QOS_DSCP_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
+ /* Pcp checks */
+ case DCB_APP_SEL_PCP:
+ if (app->protocol >= SPARX5_PORT_QOS_PCP_DEI_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ netdev_err(dev, "Invalid entry: %d:%d\n", app->protocol,
+ app->priority);
+
+ return err;
+}
+
+/* Validate apptrust configuration.
+ *
+ * Return index of supported apptrust configuration if valid, otherwise return
+ * error.
+ */
+static int sparx5_dcb_apptrust_validate(struct net_device *dev, u8 *selectors,
+ int nselectors, int *err)
+{
+ bool match = false;
+ int i, ii;
+
+ for (i = 0; i < ARRAY_SIZE(sparx5_dcb_apptrust_policies); i++) {
+ if (sparx5_dcb_apptrust_policies[i].nselectors != nselectors)
+ continue;
+ match = true;
+ for (ii = 0; ii < nselectors; ii++) {
+ if (sparx5_dcb_apptrust_policies[i].selectors[ii] !=
+ *(selectors + ii)) {
+ match = false;
+ break;
+ }
+ }
+ if (match)
+ break;
+ }
+
+ /* Requested trust configuration is not supported */
+ if (!match) {
+ netdev_err(dev, "Valid apptrust configurations are:\n");
+ for (i = 0; i < ARRAY_SIZE(sparx5_dcb_apptrust_names); i++)
+ pr_info("order: %s\n", sparx5_dcb_apptrust_names[i]);
+ *err = -EOPNOTSUPP;
+ }
+
+ return i;
+}
+
+static bool sparx5_dcb_apptrust_contains(int portno, u8 selector)
+{
+ const struct sparx5_dcb_apptrust *conf = sparx5_port_apptrust[portno];
+ int i;
+
+ for (i = 0; i < conf->nselectors; i++)
+ if (conf->selectors[i] == selector)
+ return true;
+
+ return false;
+}
+
+static int sparx5_dcb_app_update(struct net_device *dev)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5_port_qos_dscp_map *dscp_map;
+ struct sparx5_port_qos_pcp_map *pcp_map;
+ struct sparx5_port_qos qos = {0};
+ struct dcb_app app_itr = {0};
+ int portno = port->portno;
+ int i;
+
+ dscp_map = &qos.dscp.map;
+ pcp_map = &qos.pcp.map;
+
+ /* Get default prio. */
+ qos.default_prio = dcb_ieee_getapp_default_prio_mask(dev);
+ if (qos.default_prio)
+ qos.default_prio = fls(qos.default_prio) - 1;
+
+ /* Get dscp ingress mapping */
+ for (i = 0; i < ARRAY_SIZE(dscp_map->map); i++) {
+ app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ app_itr.protocol = i;
+ dscp_map->map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get pcp ingress mapping */
+ for (i = 0; i < ARRAY_SIZE(pcp_map->map); i++) {
+ app_itr.selector = DCB_APP_SEL_PCP;
+ app_itr.protocol = i;
+ pcp_map->map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Enable use of pcp for queue classification ? */
+ if (sparx5_dcb_apptrust_contains(portno, DCB_APP_SEL_PCP)) {
+ qos.pcp.qos_enable = true;
+ qos.pcp.dp_enable = qos.pcp.qos_enable;
+ }
+
+ /* Enable use of dscp for queue classification ? */
+ if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) {
+ qos.dscp.qos_enable = true;
+ qos.dscp.dp_enable = qos.dscp.qos_enable;
+ }
+
+ return sparx5_port_qos_set(port, &qos);
+}
+
+/* Set or delete dscp app entry.
+ *
+ * Dscp mapping is global for all ports, so set and delete app entries are
+ * replicated for each port.
+ */
+static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
+ struct dcb_app *app, bool del)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct dcb_app apps[SPX5_PORTS];
+ struct sparx5_port *port_itr;
+ int err, i;
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port_itr = port->sparx5->ports[i];
+ if (!port_itr)
+ continue;
+ memcpy(&apps[i], app, sizeof(struct dcb_app));
+ if (del)
+ err = dcb_ieee_delapp(port_itr->ndev, &apps[i]);
+ else
+ err = dcb_ieee_setapp(port_itr->ndev, &apps[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err = 0;
+ u8 prio;
+
+ err = sparx5_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists */
+ prio = dcb_getapp(dev, app);
+ if (prio) {
+ app_itr = *app;
+ app_itr.priority = prio;
+ dcb_ieee_delapp(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, false);
+ else
+ err = dcb_ieee_setapp(dev, app);
+
+ if (err)
+ goto out;
+
+ sparx5_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
+static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, true);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_setapptrust(struct net_device *dev, u8 *selectors,
+ int nselectors)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int err = 0, idx;
+
+ idx = sparx5_dcb_apptrust_validate(dev, selectors, nselectors, &err);
+ if (err < 0)
+ return err;
+
+ sparx5_port_apptrust[port->portno] = &sparx5_dcb_apptrust_policies[idx];
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_getapptrust(struct net_device *dev, u8 *selectors,
+ int *nselectors)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ const struct sparx5_dcb_apptrust *trust;
+
+ trust = sparx5_port_apptrust[port->portno];
+
+ memcpy(selectors, trust->selectors, trust->nselectors);
+ *nselectors = trust->nselectors;
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops sparx5_dcbnl_ops = {
+ .ieee_setapp = sparx5_dcb_ieee_setapp,
+ .ieee_delapp = sparx5_dcb_ieee_delapp,
+ .dcbnl_setapptrust = sparx5_dcb_setapptrust,
+ .dcbnl_getapptrust = sparx5_dcb_getapptrust,
+};
+
+int sparx5_dcb_init(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int i;
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+ port->ndev->dcbnl_ops = &sparx5_dcbnl_ops;
+ /* Initialize [dscp, pcp] default trust */
+ sparx5_port_apptrust[port->portno] =
+ &sparx5_dcb_apptrust_policies
+ [SPARX5_DCB_APPTRUST_DSCP_PCP];
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index b6bbb3c9bd7a..d25f4f09faa0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -675,6 +675,14 @@ static int sparx5_start(struct sparx5 *sparx5)
sparx5_board_init(sparx5);
err = sparx5_register_notifier_blocks(sparx5);
+ if (err)
+ return err;
+
+ err = sparx5_vcap_init(sparx5);
+ if (err) {
+ sparx5_unregister_notifier_blocks(sparx5);
+ return err;
+ }
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
@@ -755,6 +763,8 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
/* Default values, some from DT */
sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT;
+ sparx5->debugfs_root = debugfs_create_dir("sparx5", NULL);
+
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(sparx5->dev, "no ethernet-ports child node found\n");
@@ -900,6 +910,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
disable_irq(sparx5->xtr_irq);
sparx5->xtr_irq = -ENXIO;
@@ -911,6 +922,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
sparx5_ptp_deinit(sparx5);
sparx5_fdma_stop(sparx5);
sparx5_cleanup_ports(sparx5);
+ sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
sparx5_unregister_notifier_blocks(sparx5);
destroy_workqueue(sparx5->mact_queue);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 7a83222caa73..4a574cdcb584 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -17,6 +17,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
+#include <linux/debugfs.h>
#include "sparx5_main_regs.h"
@@ -288,8 +289,12 @@ struct sparx5 {
struct mutex ptp_lock; /* lock for ptp interface state */
u16 ptp_skbs;
int ptp_irq;
+ /* VCAP */
+ struct vcap_control *vcap_ctrl;
/* PGID allocation map */
u8 pgid_map[PGID_TABLE_SIZE];
+ /* Common root for debugfs */
+ struct dentry *debugfs_root;
};
/* sparx5_switchdev.c */
@@ -357,6 +362,16 @@ int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
int sparx_stats_init(struct sparx5 *sparx5);
+/* sparx5_dcb.c */
+#ifdef CONFIG_SPARX5_DCB
+int sparx5_dcb_init(struct sparx5 *sparx5);
+#else
+static inline int sparx5_dcb_init(struct sparx5 *sparx5)
+{
+ return 0;
+}
+#endif
+
/* sparx5_netdev.c */
void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
@@ -382,6 +397,10 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
struct sk_buff *skb);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+/* sparx5_vcap_impl.c */
+int sparx5_vcap_init(struct sparx5 *sparx5);
+void sparx5_vcap_destroy(struct sparx5 *sparx5);
+
/* sparx5_pgid.c */
enum sparx5_pgid_type {
SPX5_PGID_FREE,
@@ -418,6 +437,7 @@ static inline bool sparx5_is_baser(phy_interface_t interface)
extern const struct phylink_mac_ops sparx5_phylink_mac_ops;
extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops;
extern const struct ethtool_ops sparx5_ethtool_ops;
+extern const struct dcbnl_rtnl_ops sparx5_dcbnl_ops;
/* Calculate raw offset */
static inline __pure int spx5_offset(int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index fa2eb70f487a..6c93dd6b01b0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100.
- * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty)
+/* This file is autogenerated by cml-utils 2022-09-28 11:17:02 +0200.
+ * Commit ID: 385c8a11d71a9f6a60368d3a3cb648fa257b479a
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -171,6 +171,162 @@ enum sparx5_target {
/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+/* ANA_ACL:COMMON:VCAP_S2_CFG */
+#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA GENMASK(27, 26)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_OAM_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA GENMASK(25, 24)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_OTHER_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA GENMASK(23, 22)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_VID_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA GENMASK(21, 20)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_STD_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA GENMASK(19, 18)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP6_TCPUDP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA GENMASK(17, 16)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP_7TUPLE_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA GENMASK(15, 14)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_VID_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA GENMASK(13, 12)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_TCPUDP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA GENMASK(11, 10)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_IP4_OTHER_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA GENMASK(9, 8)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_ARP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA GENMASK(7, 6)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_SNAP_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA GENMASK(5, 4)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_TYPE_MAC_LLC_ENA, x)
+
+#define ANA_ACL_VCAP_S2_CFG_SEC_ENA GENMASK(3, 0)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
+#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
+
+/* ANA_ACL:COMMON:SWAP_IP_CTRL */
+#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
+
+#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18)
+#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL, x)
+#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL GENMASK(17, 10)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_VAL, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL GENMASK(9, 2)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_VAL, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA BIT(1)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP6_HOPC_ENA, x)
+
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA BIT(0)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
+#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
+
+/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
+#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
+
+#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK, x)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK, x)
+
+#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK GENMASK(5, 0)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
+#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
+
+/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
+
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN, x)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN, x)
+
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS BIT(4)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS, x)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_DIS, x)
+
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES GENMASK(3, 0)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
+
/* ANA_ACL:COMMON:OWN_UPSID */
#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
@@ -180,6 +336,174 @@ enum sparx5_target {
#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
+#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL, 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13)
+#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL BIT(12)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IGR_PORT_MASK_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL GENMASK(11, 10)
+#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL GENMASK(9, 8)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL GENMASK(7, 6)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL GENMASK(5, 3)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL GENMASK(2, 1)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL, x)
+
+#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL BIT(0)
+#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
+#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
+
+/* ANA_ACL:CNT_A:CNT_A */
+#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL, 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
+
+/* ANA_ACL:CNT_B:CNT_B */
+#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL, 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
+
+/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
+#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL, 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY BIT(16)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY BIT(15)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY BIT(14)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY BIT(13)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY BIT(12)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY BIT(11)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(10)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(9)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY BIT(8)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(6)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(5)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(4)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(3)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY BIT(2)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY BIT(1)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY, x)
+
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\
+ FIELD_PREP(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
+ FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+
/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
@@ -426,6 +750,96 @@ enum sparx5_target {
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
+#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
+
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, x)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_GET(x)\
+ FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, x)
+
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL GENMASK(2, 0)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
+#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_GET(x)\
+ FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
+
+/* ANA_CL:PORT:QOS_CFG */
+#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
+
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_COSID_ENA, x)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_COSID_ENA, x)
+
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_VAL GENMASK(16, 14)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_COSID_VAL, x)
+#define ANA_CL_QOS_CFG_DEFAULT_COSID_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_COSID_VAL, x)
+
+#define ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL GENMASK(13, 12)
+#define ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, x)
+#define ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, x)
+
+#define ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA BIT(11)
+#define ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_TRANSLATE_ENA, x)
+
+#define ANA_CL_QOS_CFG_DSCP_KEEP_ENA BIT(10)
+#define ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_KEEP_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_KEEP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_KEEP_ENA, x)
+
+#define ANA_CL_QOS_CFG_KEEP_ENA BIT(9)
+#define ANA_CL_QOS_CFG_KEEP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_KEEP_ENA, x)
+#define ANA_CL_QOS_CFG_KEEP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_KEEP_ENA, x)
+
+#define ANA_CL_QOS_CFG_PCP_DEI_DP_ENA BIT(8)
+#define ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_PCP_DEI_DP_ENA, x)
+#define ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_PCP_DEI_DP_ENA, x)
+
+#define ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA BIT(7)
+#define ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA, x)
+#define ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA, x)
+
+#define ANA_CL_QOS_CFG_DSCP_DP_ENA BIT(6)
+#define ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_DP_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_DP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_DP_ENA, x)
+
+#define ANA_CL_QOS_CFG_DSCP_QOS_ENA BIT(5)
+#define ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DSCP_QOS_ENA, x)
+#define ANA_CL_QOS_CFG_DSCP_QOS_ENA_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DSCP_QOS_ENA, x)
+
+#define ANA_CL_QOS_CFG_DEFAULT_DP_VAL GENMASK(4, 3)
+#define ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_DP_VAL, x)
+#define ANA_CL_QOS_CFG_DEFAULT_DP_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_DP_VAL, x)
+
+#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL GENMASK(2, 0)
+#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
+#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
+
/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
@@ -438,6 +852,39 @@ enum sparx5_target {
#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+/* ANA_CL:COMMON:DSCP_CFG */
+#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
+
+#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7)
+#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL, x)
+#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_QOS_VAL GENMASK(6, 4)
+#define ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_QOS_VAL, x)
+#define ANA_CL_DSCP_CFG_DSCP_QOS_VAL_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_QOS_VAL, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_DP_VAL GENMASK(3, 2)
+#define ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_DP_VAL, x)
+#define ANA_CL_DSCP_CFG_DSCP_DP_VAL_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_DP_VAL, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_REWR_ENA BIT(1)
+#define ANA_CL_DSCP_CFG_DSCP_REWR_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_REWR_ENA, x)
+#define ANA_CL_DSCP_CFG_DSCP_REWR_ENA_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_REWR_ENA, x)
+
+#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA BIT(0)
+#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
+#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
+ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
+
/* ANA_L2:COMMON:AUTO_LRN_CFG */
#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
@@ -5039,6 +5486,138 @@ enum sparx5_target {
#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_CMD, x)
+#define VCAP_SUPER_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_CMD, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_SUPER_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_SUPER_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_ADDR, x)
+#define VCAP_SUPER_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_SUPER_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_SUPER_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_UPDATE_SHOT, x)
+#define VCAP_SUPER_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_SUPER_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_SUPER_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_CLEAR_CACHE, x)
+#define VCAP_SUPER_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CFG_MV_NUM_POS, x)
+#define VCAP_SUPER_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_SUPER_CFG_MV_NUM_POS, x)
+
+#define VCAP_SUPER_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_SUPER_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_SUPER_CFG_MV_SIZE, x)
+#define VCAP_SUPER_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_SUPER_IDX_CORE_IDX, x)
+#define VCAP_SUPER_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x)
+
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_SUPER_MAP_CORE_MAP, x)
+#define VCAP_SUPER_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x)
+
+/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
+#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
+#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
+#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
+#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_SUPER:VCAP_CONST:IF_CNT */
+#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index 830da0e5ff27..bb97d27a1da4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -138,7 +138,6 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
};
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = sparx5_phylink_mac_select_pcs,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 32709d21ab2f..107b9cd931c0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/phy/phy.h>
+#include <net/dcbnl.h>
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
@@ -1144,3 +1145,101 @@ void sparx5_port_enable(struct sparx5_port *port, bool enable)
sparx5,
QFWD_SWITCH_PORT_MODE(port->portno));
}
+
+int sparx5_port_qos_set(struct sparx5_port *port,
+ struct sparx5_port_qos *qos)
+{
+ sparx5_port_qos_dscp_set(port, &qos->dscp);
+ sparx5_port_qos_pcp_set(port, &qos->pcp);
+ sparx5_port_qos_default_set(port, qos);
+
+ return 0;
+}
+
+int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 *pcp_itr = qos->map.map;
+ u8 pcp, dp;
+ int i;
+
+ /* Enable/disable pcp and dp for qos classification. */
+ spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
+ ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
+ ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
+ sparx5, ANA_CL_QOS_CFG(port->portno));
+
+ /* Map each pcp and dei value to priority and dp */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ pcp = *(pcp_itr + i);
+ dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
+ spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
+ ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
+ ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
+ ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
+ ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
+ }
+
+ return 0;
+}
+
+int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 *dscp = qos->map.map;
+ int i;
+
+ /* Enable/disable dscp and dp for qos classification.
+ * Disable rewrite of dscp values for now.
+ */
+ spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
+ ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
+ ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
+ ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
+ ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
+ ANA_CL_QOS_CFG(port->portno));
+
+ /* Map each dscp value to priority and dp */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
+ ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
+ ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
+ ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
+ ANA_CL_DSCP_CFG(i));
+ }
+
+ /* Set per-dscp trust */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ if (qos->qos_enable) {
+ spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
+ ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
+ ANA_CL_DSCP_CFG(i));
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_port_qos_default_set(const struct sparx5_port *port,
+ const struct sparx5_port_qos *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ /* Set default prio and dp level */
+ spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
+ ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
+ ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
+ ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
+ sparx5, ANA_CL_QOS_CFG(port->portno));
+
+ /* Set default pcp and dei for untagged frames */
+ spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
+ ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
+ ANA_CL_VLAN_CTRL_PORT_PCP |
+ ANA_CL_VLAN_CTRL_PORT_DEI,
+ sparx5, ANA_CL_VLAN_CTRL(port->portno));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 2f8043eac71b..fbafe22e25cc 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -91,4 +91,46 @@ int sparx5_get_port_status(struct sparx5 *sparx5,
void sparx5_port_enable(struct sparx5_port *port, bool enable);
int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed);
+#define SPARX5_PORT_QOS_PCP_COUNT 8
+#define SPARX5_PORT_QOS_DEI_COUNT 8
+#define SPARX5_PORT_QOS_PCP_DEI_COUNT \
+ (SPARX5_PORT_QOS_PCP_COUNT + SPARX5_PORT_QOS_DEI_COUNT)
+struct sparx5_port_qos_pcp_map {
+ u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT];
+};
+
+#define SPARX5_PORT_QOS_DSCP_COUNT 64
+struct sparx5_port_qos_dscp_map {
+ u8 map[SPARX5_PORT_QOS_DSCP_COUNT];
+};
+
+struct sparx5_port_qos_pcp {
+ struct sparx5_port_qos_pcp_map map;
+ bool qos_enable;
+ bool dp_enable;
+};
+
+struct sparx5_port_qos_dscp {
+ struct sparx5_port_qos_dscp_map map;
+ bool qos_enable;
+ bool dp_enable;
+};
+
+struct sparx5_port_qos {
+ struct sparx5_port_qos_pcp pcp;
+ struct sparx5_port_qos_dscp dscp;
+ u8 default_prio;
+};
+
+int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
+
+int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp *qos);
+
+int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp *qos);
+
+int sparx5_port_qos_default_set(const struct sparx5_port *port,
+ const struct sparx5_port_qos *qos);
+
#endif /* __SPARX5_PORT_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
index 1e79d0ef0cb8..379e540e5e6a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -389,6 +389,10 @@ int sparx5_qos_init(struct sparx5 *sparx5)
if (ret < 0)
return ret;
+ ret = sparx5_dcb_init(sparx5);
+ if (ret < 0)
+ return ret;
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index dc2c3756e3a2..205246b5af82 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -10,6 +10,55 @@
#include "sparx5_main.h"
#include "sparx5_qos.h"
+/* tc block handling */
+static LIST_HEAD(sparx5_block_cb_list);
+
+static int sparx5_tc_block_cb(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct net_device *ndev = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return sparx5_tc_matchall(ndev, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return sparx5_tc_flower(ndev, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sparx5_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return sparx5_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int sparx5_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return sparx5_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int sparx5_tc_setup_block(struct net_device *ndev,
+ struct flow_block_offload *fbo)
+{
+ flow_setup_cb_t *cb;
+
+ if (fbo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ cb = sparx5_tc_block_cb_ingress;
+ else if (fbo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ cb = sparx5_tc_block_cb_egress;
+ else
+ return -EOPNOTSUPP;
+
+ return flow_block_cb_setup_simple(fbo, &sparx5_block_cb_list,
+ cb, ndev, ndev, false);
+}
+
static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
u32 *idx)
{
@@ -108,6 +157,8 @@ int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
+ case TC_SETUP_BLOCK:
+ return sparx5_tc_setup_block(ndev, type_data);
case TC_SETUP_QDISC_MQPRIO:
return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TBF:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
index 5b55e11b77e1..adab88e6b21f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -7,9 +7,28 @@
#ifndef __SPARX5_TC_H__
#define __SPARX5_TC_H__
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
#include <linux/netdevice.h>
+/* Controls how PORT_MASK is applied */
+enum SPX5_PORT_MASK_MODE {
+ SPX5_PMM_OR_DSTMASK,
+ SPX5_PMM_AND_VLANMASK,
+ SPX5_PMM_REPLACE_PGID,
+ SPX5_PMM_REPLACE_ALL,
+ SPX5_PMM_REDIR_PGID,
+ SPX5_PMM_OR_PGID_MASK,
+};
+
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
+int sparx5_tc_matchall(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress);
+
+int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
+ bool ingress);
+
#endif /* __SPARX5_TC_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
new file mode 100644
index 000000000000..1ed304a816cc
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/tcp.h>
+
+#include "sparx5_tc.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+
+#define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
+
+/* Collect keysets and type ids for multiple rules per size */
+struct sparx5_wildcard_rule {
+ bool selected;
+ u8 value;
+ u8 mask;
+ enum vcap_keyfield_set keyset;
+};
+
+struct sparx5_multiple_rules {
+ struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
+};
+
+struct sparx5_tc_flower_parse_usage {
+ struct flow_cls_offload *fco;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ u16 l3_proto;
+ u8 l4_proto;
+ unsigned int used_keys;
+};
+
+struct sparx5_tc_rule_pkt_cnt {
+ u64 cookie;
+ u32 pkts;
+};
+
+/* These protocols have dedicated keysets in IS2 and a TC dissector
+ * ETH_P_ARP does not have a TC dissector
+ */
+static u16 sparx5_tc_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+enum sparx5_is2_arp_opcode {
+ SPX5_IS2_ARP_REQUEST,
+ SPX5_IS2_ARP_REPLY,
+ SPX5_IS2_RARP_REQUEST,
+ SPX5_IS2_RARP_REPLY,
+};
+
+enum tc_arp_opcode {
+ TC_ARP_OP_RESERVED,
+ TC_ARP_OP_REQUEST,
+ TC_ARP_OP_REPLY,
+};
+
+static bool sparx5_tc_is_known_etype(u16 etype)
+{
+ int idx;
+
+ /* For now this only knows about IS2 traffic classification */
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
+ if (sparx5_tc_known_etypes[idx] == etype)
+ return true;
+
+ return false;
+}
+
+static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
+ enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
+ struct flow_match_eth_addrs match;
+ struct vcap_u48_key smac, dmac;
+ int err = 0;
+
+ flow_rule_match_eth_addrs(st->frule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
+ vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ if (err)
+ goto out;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
+ vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs mt;
+
+ flow_rule_match_ipv4_addrs(st->frule, &mt);
+ if (mt.mask->src) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_SIP,
+ be32_to_cpu(mt.key->src),
+ be32_to_cpu(mt.mask->src));
+ if (err)
+ goto out;
+ }
+ if (mt.mask->dst) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_DIP,
+ be32_to_cpu(mt.key->dst),
+ be32_to_cpu(mt.mask->dst));
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IPV6) {
+ struct flow_match_ipv6_addrs mt;
+ struct vcap_u128_key sip;
+ struct vcap_u128_key dip;
+
+ flow_rule_match_ipv6_addrs(st->frule, &mt);
+ /* Check if address masks are non-zero */
+ if (!ipv6_addr_any(&mt.mask->src)) {
+ vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
+ vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_SIP, &sip);
+ if (err)
+ goto out;
+ }
+ if (!ipv6_addr_any(&mt.mask->dst)) {
+ vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
+ vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_DIP, &dip);
+ if (err)
+ goto out;
+ }
+ }
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_control mt;
+ u32 value, mask;
+ int err = 0;
+
+ flow_rule_match_control(st->frule, &mt);
+
+ if (mt.mask->flags) {
+ if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+ value = 1; /* initial fragment */
+ mask = 0x3;
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_ports mt;
+ u16 value, mask;
+ int err = 0;
+
+ flow_rule_match_ports(st->frule, &mt);
+
+ if (mt.mask->src) {
+ value = be16_to_cpu(mt.key->src);
+ mask = be16_to_cpu(mt.mask->src);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->dst) {
+ value = be16_to_cpu(mt.key->dst);
+ mask = be16_to_cpu(mt.mask->dst);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic mt;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &mt);
+
+ if (mt.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(mt.key->n_proto);
+ if (!sparx5_tc_is_known_etype(st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IPV6) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ }
+ }
+
+ if (mt.mask->ip_proto) {
+ st->l4_proto = mt.key->ip_proto;
+ if (st->l4_proto == IPPROTO_TCP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+ struct flow_match_vlan mt;
+ int err;
+
+ flow_rule_match_vlan(st->frule, &mt);
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_tcp mt;
+ u16 tcp_flags_mask;
+ u16 tcp_flags_key;
+ enum vcap_bit val;
+ int err = 0;
+
+ flow_rule_match_tcp(st->frule, &mt);
+ tcp_flags_key = be16_to_cpu(mt.key->flags);
+ tcp_flags_mask = be16_to_cpu(mt.mask->flags);
+
+ if (tcp_flags_mask & TCPHDR_FIN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_FIN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_SYN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_SYN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_RST) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_RST)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_PSH) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_PSH)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_ACK) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_ACK)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_URG) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_URG)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_arp mt;
+ u16 value, mask;
+ u32 ipval, ipmsk;
+ int err;
+
+ flow_rule_match_arp(st->frule, &mt);
+
+ if (mt.mask->op) {
+ mask = 0x3;
+ if (st->l3_proto == ETH_P_ARP) {
+ value = mt.key->op == TC_ARP_OP_REQUEST ?
+ SPX5_IS2_ARP_REQUEST :
+ SPX5_IS2_ARP_REPLY;
+ } else { /* RARP */
+ value = mt.key->op == TC_ARP_OP_REQUEST ?
+ SPX5_IS2_RARP_REQUEST :
+ SPX5_IS2_RARP_REPLY;
+ }
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ /* The IS2 ARP keyset does not support ARP hardware addresses */
+ if (!is_zero_ether_addr(mt.mask->sha) ||
+ !is_zero_ether_addr(mt.mask->tha)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mt.mask->sip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->sip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->tip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->tip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
+
+ return 0;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_ip mt;
+ int err = 0;
+
+ flow_rule_match_ip(st->frule, &mt);
+
+ if (mt.mask->tos) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
+ mt.key->tos,
+ mt.mask->tos);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
+ return err;
+}
+
+static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
+};
+
+static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ u16 *l3_proto)
+{
+ struct sparx5_tc_flower_parse_usage state = {
+ .fco = fco,
+ .vrule = vrule,
+ .l3_proto = ETH_P_ALL,
+ };
+ int idx, err = 0;
+
+ state.frule = flow_cls_offload_flow_rule(fco);
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
+ if (!flow_rule_match_key(state.frule, idx))
+ continue;
+ if (!sparx5_tc_flower_usage_handlers[idx])
+ continue;
+ err = sparx5_tc_flower_usage_handlers[idx](&state);
+ if (err)
+ return err;
+ }
+
+ if (state.frule->match.dissector->used_keys ^ state.used_keys) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Unsupported match item");
+ return -ENOENT;
+ }
+
+ if (l3_proto)
+ *l3_proto = state.l3_proto;
+ return err;
+}
+
+static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
+ struct flow_action_entry *actent, *last_actent = NULL;
+ struct flow_action *act = &rule->action;
+ u64 action_mask = 0;
+ int idx;
+
+ if (!flow_action_has_entries(act)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
+ return -EINVAL;
+ }
+
+ if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(idx, actent, act) {
+ if (action_mask & BIT(actent->id)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "More actions of the same type");
+ return -EINVAL;
+ }
+ action_mask |= BIT(actent->id);
+ last_actent = actent; /* Save last action for later check */
+ }
+
+ /* Check that last action is a goto */
+ if (last_actent->id != FLOW_ACTION_GOTO) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Last action must be 'goto'");
+ return -EINVAL;
+ }
+
+ /* Check if the goto chain is in the next lookup */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+
+ /* Catch unsupported combinations of actions */
+ if (action_mask & BIT(FLOW_ACTION_TRAP) &&
+ action_mask & BIT(FLOW_ACTION_ACCEPT)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine pass and trap action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Add a rule counter action - only IS2 is considered for now */
+static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ int err;
+
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
+ if (err)
+ return err;
+
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ return err;
+}
+
+/* Collect all port keysets and apply the first of them, possibly wildcarded */
+static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
+ struct vcap_rule *vrule,
+ struct vcap_admin *admin,
+ u16 l3_proto,
+ struct sparx5_multiple_rules *multi)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_keyset_list portkeysetlist = {};
+ enum vcap_keyfield_set portkeysets[10] = {};
+ struct vcap_keyset_list matches = {};
+ enum vcap_keyfield_set keysets[10];
+ int idx, jdx, err = 0, count = 0;
+ struct sparx5_wildcard_rule *mru;
+ const struct vcap_set *kinfo;
+ struct vcap_control *vctrl;
+
+ vctrl = port->sparx5->vcap_ctrl;
+
+ /* Find the keysets that the rule can use */
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+ if (vcap_rule_find_keysets(vrule, &matches) == 0)
+ return -EINVAL;
+
+ /* Find the keysets that the port configuration supports */
+ portkeysetlist.max = ARRAY_SIZE(portkeysets);
+ portkeysetlist.keysets = portkeysets;
+ err = sparx5_vcap_get_port_keyset(ndev,
+ admin, vrule->vcap_chain_id,
+ l3_proto,
+ &portkeysetlist);
+ if (err)
+ return err;
+
+ /* Find the intersection of the two sets of keyset */
+ for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
+ kinfo = vcap_keyfieldset(vctrl, admin->vtype,
+ portkeysetlist.keysets[idx]);
+ if (!kinfo)
+ continue;
+
+ /* Find a port keyset that matches the required keys
+ * If there are multiple keysets then compose a type id mask
+ */
+ for (jdx = 0; jdx < matches.cnt; ++jdx) {
+ if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
+ continue;
+
+ mru = &multi->rule[kinfo->sw_per_item];
+ if (!mru->selected) {
+ mru->selected = true;
+ mru->keyset = portkeysetlist.keysets[idx];
+ mru->value = kinfo->type_id;
+ }
+ mru->value &= kinfo->type_id;
+ mru->mask |= kinfo->type_id;
+ ++count;
+ }
+ }
+ if (count == 0)
+ return -EPROTO;
+
+ if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
+ return -ENOENT;
+
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ mru = &multi->rule[idx];
+ if (!mru->selected)
+ continue;
+
+ /* Align the mask to the combined value */
+ mru->mask ^= mru->value;
+ }
+
+ /* Set the chosen keyset on the rule and set a wildcarded type if there
+ * are more than one keyset
+ */
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ mru = &multi->rule[idx];
+ if (!mru->selected)
+ continue;
+
+ vcap_set_rule_set_keyset(vrule, mru->keyset);
+ if (count > 1)
+ /* Some keysets do not have a type field */
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
+ mru->value,
+ ~mru->mask);
+ mru->selected = false; /* mark as done */
+ break; /* Stop here and add more rules later */
+ }
+ return err;
+}
+
+static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_rule *erule,
+ struct vcap_admin *admin,
+ struct sparx5_wildcard_rule *rule)
+{
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_TYPE,
+ };
+ struct vcap_rule *vrule;
+ int err;
+
+ /* Add an extra rule with a special user and the new keyset */
+ erule->user = VCAP_USER_TC_EXTRA;
+ vrule = vcap_copy_rule(erule);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ /* Link the new rule to the existing rule with the cookie */
+ vrule->cookie = erule->cookie;
+ vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
+ err = vcap_set_rule_set_keyset(vrule, rule->keyset);
+ if (err) {
+ pr_err("%s:%d: could not set keyset %s in rule: %u\n",
+ __func__, __LINE__,
+ vcap_keyset_name(vctrl, rule->keyset),
+ vrule->id);
+ goto out;
+ }
+
+ /* Some keysets do not have a type field, so ignore return value */
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
+
+ err = vcap_set_rule_set_actionset(vrule, erule->actionset);
+ if (err)
+ goto out;
+
+ err = sparx5_tc_add_rule_counter(admin, vrule);
+ if (err)
+ goto out;
+
+ err = vcap_val_rule(vrule, ETH_P_ALL);
+ if (err) {
+ pr_err("%s:%d: could not validate rule: %u\n",
+ __func__, __LINE__, vrule->id);
+ vcap_set_tc_exterr(fco, vrule);
+ goto out;
+ }
+ err = vcap_add_rule(vrule);
+ if (err) {
+ pr_err("%s:%d: could not add rule: %u\n",
+ __func__, __LINE__, vrule->id);
+ goto out;
+ }
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_rule *erule,
+ struct vcap_admin *admin,
+ struct sparx5_multiple_rules *multi)
+{
+ int idx, err = 0;
+
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ if (!multi->rule[idx].selected)
+ continue;
+
+ err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
+ &multi->rule[idx]);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static int sparx5_tc_flower_replace(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_multiple_rules multi = {};
+ struct flow_action_entry *act;
+ struct vcap_control *vctrl;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ u16 l3_proto;
+ int err, idx;
+
+ vctrl = port->sparx5->vcap_ctrl;
+
+ err = sparx5_tc_flower_action_check(vctrl, fco, admin);
+ if (err)
+ return err;
+
+ vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
+ fco->common.prio, 0);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ vrule->cookie = fco->cookie;
+
+ l3_proto = ETH_P_ALL;
+ err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
+ if (err)
+ goto out;
+
+ err = sparx5_tc_add_rule_counter(admin, vrule);
+ if (err)
+ goto out;
+
+ frule = flow_cls_offload_flow_rule(fco);
+ flow_action_for_each(idx, act, &frule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_TRAP:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ if (err)
+ goto out;
+ /* For now the actionset is hardcoded */
+ err = vcap_set_rule_set_actionset(vrule,
+ VCAP_AFS_BASE_TYPE);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_ACCEPT:
+ /* For now the actionset is hardcoded */
+ err = vcap_set_rule_set_actionset(vrule,
+ VCAP_AFS_BASE_TYPE);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_GOTO:
+ /* Links between VCAPs will be added later */
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Unsupported TC action");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
+ &multi);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No matching port keyset for filter protocol and keys");
+ goto out;
+ }
+
+ /* provide the l3 protocol to guide the keyset selection */
+ err = vcap_val_rule(vrule, l3_proto);
+ if (err) {
+ vcap_set_tc_exterr(fco, vrule);
+ goto out;
+ }
+ err = vcap_add_rule(vrule);
+ if (err)
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Could not add the filter");
+
+ if (l3_proto == ETH_P_ALL)
+ err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
+ &multi);
+
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int sparx5_tc_flower_destroy(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_control *vctrl;
+ int err = -ENOENT, rule_id;
+
+ vctrl = port->sparx5->vcap_ctrl;
+ while (true) {
+ rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
+ if (rule_id <= 0)
+ break;
+ err = vcap_del_rule(vctrl, ndev, rule_id);
+ if (err) {
+ pr_err("%s:%d: could not delete rule %d\n",
+ __func__, __LINE__, rule_id);
+ break;
+ }
+ }
+ return err;
+}
+
+/* Collect packet counts from all rules with the same cookie */
+static int sparx5_tc_rule_counter_cb(void *arg, struct vcap_rule *rule)
+{
+ struct sparx5_tc_rule_pkt_cnt *rinfo = arg;
+ struct vcap_counter counter;
+ int err = 0;
+
+ if (rule->cookie == rinfo->cookie) {
+ err = vcap_rule_get_counter(rule, &counter);
+ if (err)
+ return err;
+ rinfo->pkts += counter.value;
+ /* Reset the rule counter */
+ counter.value = 0;
+ vcap_rule_set_counter(rule, &counter);
+ }
+ return err;
+}
+
+static int sparx5_tc_flower_stats(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_tc_rule_pkt_cnt rinfo = {};
+ struct vcap_control *vctrl;
+ ulong lastused = 0;
+ u64 drops = 0;
+ u32 pkts = 0;
+ int err;
+
+ rinfo.cookie = fco->cookie;
+ vctrl = port->sparx5->vcap_ctrl;
+ err = vcap_rule_iter(vctrl, sparx5_tc_rule_counter_cb, &rinfo);
+ if (err)
+ return err;
+ pkts = rinfo.pkts;
+ flow_stats_update(&fco->stats, 0x0, pkts, drops, lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ return err;
+}
+
+int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_control *vctrl;
+ struct vcap_admin *admin;
+ int err = -EINVAL;
+
+ /* Get vcap instance from the chain id */
+ vctrl = port->sparx5->vcap_ctrl;
+ admin = vcap_find_admin(vctrl, fco->common.chain_index);
+ if (!admin) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
+ return err;
+ }
+
+ switch (fco->command) {
+ case FLOW_CLS_REPLACE:
+ return sparx5_tc_flower_replace(ndev, fco, admin);
+ case FLOW_CLS_DESTROY:
+ return sparx5_tc_flower_destroy(ndev, fco, admin);
+ case FLOW_CLS_STATS:
+ return sparx5_tc_flower_stats(ndev, fco, admin);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
new file mode 100644
index 000000000000..30dd61e5d150
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_tc.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+
+static int sparx5_tc_matchall_replace(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct flow_action_entry *action;
+ struct sparx5 *sparx5;
+ int err;
+
+ if (!flow_offload_has_one_action(&tmo->rule->action)) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Only one action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+ action = &tmo->rule->action.entries[0];
+
+ sparx5 = port->sparx5;
+ switch (action->id) {
+ case FLOW_ACTION_GOTO:
+ err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ action->chain_index, tmo->cookie,
+ true);
+ if (err == -EFAULT) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Unsupported goto chain");
+ return -EOPNOTSUPP;
+ }
+ if (err == -EADDRINUSE) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "VCAP already enabled");
+ return -EOPNOTSUPP;
+ }
+ if (err) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Could not enable VCAP lookups");
+ return err;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int sparx5_tc_matchall_destroy(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5;
+ int err;
+
+ sparx5 = port->sparx5;
+ if (!tmo->rule && tmo->cookie) {
+ err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, 0,
+ tmo->cookie, false);
+ if (err)
+ return err;
+ return 0;
+ }
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+}
+
+int sparx5_tc_matchall(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ if (!tc_cls_can_offload_and_chain0(ndev, &tmo->common)) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Only chain zero is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tmo->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return sparx5_tc_matchall_replace(ndev, tmo, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return sparx5_tc_matchall_destroy(ndev, tmo, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
new file mode 100644
index 000000000000..1bd987c664e8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
+ * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "vcap_api.h"
+#include "sparx5_vcap_ag_api.h"
+
+/* keyfields */
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 90,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 138,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 186,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 187,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 203,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 267,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 284,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 86,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 139,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 140,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 142,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 206,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 202,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 226,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 193,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 91,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 228,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 244,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 116,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 121,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 169,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 227,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 355,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 483,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 486,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 502,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 518,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 534,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 542,
+ .width = 64,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 6,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 30,
+ .width = 68,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 12,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+};
+
+/* VCAPs */
+const struct vcap_info sparx5_vcaps[] = {
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 73,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics sparx5_vcap_stats = {
+ .name = "sparx5",
+ .count = 1,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
new file mode 100644
index 000000000000..7d106f1276fe
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
+ * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+ */
+
+#ifndef __SPARX5_VCAP_AG_API_H__
+#define __SPARX5_VCAP_AG_API_H__
+
+/* VCAPs */
+extern const struct vcap_info sparx5_vcaps[];
+extern const struct vcap_statistics sparx5_vcap_stats;
+
+#endif /* __SPARX5_VCAP_AG_API_H__ */
+
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
new file mode 100644
index 000000000000..b91e05ffe2f4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver VCAP debugFS implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "sparx5_vcap_debugfs.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+#include "sparx5_vcap_ag_api.h"
+
+static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_CFG(port->portno));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ /* Get key selection state */
+ value = spx5_rd(sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(port->portno, lookup));
+
+ out->prf(out->dst, "\n noneth: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_NONETH_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_NONETH_CUSTOM_1:
+ out->prf(out->dst, "custom1");
+ break;
+ case VCAP_IS2_PS_NONETH_CUSTOM_2:
+ out->prf(out->dst, "custom2");
+ break;
+ case VCAP_IS2_PS_NONETH_NO_LOOKUP:
+ out->prf(out->dst, "none");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4_mc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_MC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP4_VID:
+ out->prf(out->dst, "ip4_vid");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4_uc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_UC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6_mc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_MC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_VID:
+ out->prf(out->dst, "ip6_vid");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ipv4_other");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6_uc: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_UC_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n arp: ");
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_ARP_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_IS2_PS_ARP_ARP:
+ out->prf(out->dst, "arp");
+ break;
+ }
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " Sticky bits: ");
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+ /* Get lookup sticky bits */
+ value = spx5_rd(sparx5, ANA_ACL_SEC_LOOKUP_STICKY(lookup));
+
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_GET(value))
+ out->prf(out->dst, " sel_clm");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_IRLEG_STICKY_GET(value))
+ out->prf(out->dst, " sel_irleg");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_ERLEG_STICKY_GET(value))
+ out->prf(out->dst, " sel_erleg");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_PORT_STICKY_GET(value))
+ out->prf(out->dst, " sel_port");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM2_STICKY_GET(value))
+ out->prf(out->dst, " custom2");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_CUSTOM1_STICKY_GET(value))
+ out->prf(out->dst, " custom1");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_OAM_STICKY_GET(value))
+ out->prf(out->dst, " oam");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip6_vid");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(value))
+ out->prf(out->dst, " ip6_std");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip6_tcpudp");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(value))
+ out->prf(out->dst, " ip_7tuple");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip4_vid");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip4_tcpudp");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(value))
+ out->prf(out->dst, " ip4_other");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(value))
+ out->prf(out->dst, " arp");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_SNAP_STICKY_GET(value))
+ out->prf(out->dst, " mac_snap");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_LLC_STICKY_GET(value))
+ out->prf(out->dst, " mac_llc");
+ if (ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(value))
+ out->prf(out->dst, " mac_etype");
+ /* Clear stickies */
+ spx5_wr(value, sparx5, ANA_ACL_SEC_LOOKUP_STICKY(lookup));
+ }
+ out->prf(out->dst, "\n");
+}
+
+/* Provide port information via a callback interface */
+int sparx5_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = sparx5->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+ out->prf(out->dst, "%s:\n", vcap->name);
+ sparx5_vcap_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_port_stickies(sparx5, admin, out);
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h
new file mode 100644
index 000000000000..f9ede03441f2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver VCAP implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_VCAP_DEBUGFS_H__
+#define __SPARX5_VCAP_DEBUGFS_H__
+
+#include <linux/netdevice.h>
+
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* Provide port information via a callback interface */
+int sparx5_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+
+#else
+
+static inline int sparx5_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __SPARX5_VCAP_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
new file mode 100644
index 000000000000..a0c126ba9a87
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver VCAP implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+
+#include "vcap_api_debugfs.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_vcap_impl.h"
+#include "sparx5_vcap_ag_api.h"
+#include "sparx5_vcap_debugfs.h"
+
+#define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */
+#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */
+
+#define SPARX5_IS2_LOOKUPS 4
+#define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \
+ (ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(_v4_mc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(_v4_uc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(_v6_mc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
+ ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
+
+static struct sparx5_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses, not in super vcap */
+ int map_id; /* id in the super vcap block mapping (if applicable) */
+ int blockno; /* starting block in super vcap (if applicable) */
+ int blocks; /* number of blocks in super vcap (if applicable) */
+} sparx5_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .vinst = 0,
+ .map_id = 4,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L0,
+ .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
+ .blockno = 0, /* Maps block 0-1 */
+ .blocks = 2,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-1 */
+ .vinst = 1,
+ .map_id = 5,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L2,
+ .last_cid = SPARX5_VCAP_CID_IS2_MAX,
+ .blockno = 2, /* Maps block 2-3 */
+ .blocks = 2,
+ },
+};
+
+/* Await the super VCAP completion of the current operation */
+static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_SUPER_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_SUPER_CTRL);
+}
+
+/* Initializing a VCAP address range: only IS2 for now */
+static void _sparx5_vcap_range_init(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ u32 addr, u32 count)
+{
+ u32 size = count - 1;
+
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+}
+
+/* Initializing VCAP rule data area */
+static void sparx5_vcap_block_init(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ _sparx5_vcap_range_init(sparx5, admin, admin->first_valid_addr,
+ admin->last_valid_addr -
+ admin->first_valid_addr);
+}
+
+/* Get the keyset name from the sparx5 VCAP model */
+static const char *sparx5_vcap_keyset_name(struct net_device *ndev,
+ enum vcap_keyfield_set keyset)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ return vcap_keyset_name(port->sparx5->vcap_ctrl, keyset);
+}
+
+/* Check if this is the first lookup of IS2 */
+static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L1) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L2 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3));
+}
+
+/* Set the narrow range ingress port mask on a rule */
+static void sparx5_vcap_add_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 port_mask;
+ u32 range;
+
+ range = port->portno / BITS_PER_TYPE(u32);
+ /* Port bit set to match-any */
+ port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32));
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_SEL, 0, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG, range, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0, port_mask);
+}
+
+/* Set the wide range ingress port mask on a rule */
+static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_u72_key port_mask;
+ u32 range;
+
+ /* Port bit set to match-any */
+ memset(port_mask.value, 0, sizeof(port_mask.value));
+ memset(port_mask.mask, 0xff, sizeof(port_mask.mask));
+ range = port->portno / BITS_PER_BYTE;
+ port_mask.mask[range] = ~BIT(port->portno % BITS_PER_BYTE);
+ vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask);
+}
+
+/* Convert chain id to vcap lookup id */
+static int sparx5_vcap_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ /* For now only handle IS2 */
+ if (cid >= SPARX5_VCAP_CID_IS2_L1 && cid < SPARX5_VCAP_CID_IS2_L2)
+ lookup = 1;
+ else if (cid >= SPARX5_VCAP_CID_IS2_L2 && cid < SPARX5_VCAP_CID_IS2_L3)
+ lookup = 2;
+ else if (cid >= SPARX5_VCAP_CID_IS2_L3 && cid < SPARX5_VCAP_CID_IS2_MAX)
+ lookup = 3;
+
+ return lookup;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ /* Check if the port keyset selection is enabled */
+ value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ if (!ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(value))
+ return -ENOENT;
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_ARP_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_ARP_ARP:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_UC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_IS2_PS_IPV4_UC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ }
+
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV4_MC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_IS2_PS_IPV4_MC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_UC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ }
+
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_IPV6_MC_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_IS2_PS_IPV6_MC_IP6_VID:
+ /* Not used */
+ break;
+ }
+ }
+
+ if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
+ l3_proto != ETH_P_IPV6) {
+ switch (ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(value)) {
+ case VCAP_IS2_PS_NONETH_MAC_ETYPE:
+ /* IS2 non-classified frames generate MAC_ETYPE */
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Get the port keyset for the vcap lookup */
+int sparx5_vcap_get_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ struct vcap_keyset_list *kslist)
+{
+ int lookup;
+
+ lookup = sparx5_vcap_cid_to_lookup(cid);
+ return sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist, l3_proto);
+}
+
+/* API callback used for validating a field keyset (check the port keysets) */
+static enum vcap_keyfield_set
+sparx5_vcap_validate_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ struct vcap_keyset_list keysetlist = {};
+ enum vcap_keyfield_set keysets[10] = {};
+ int idx, jdx, lookup;
+
+ if (!kslist || kslist->cnt == 0)
+ return VCAP_KFS_NO_VALUE;
+
+ /* Get a list of currently configured keysets in the lookups */
+ lookup = sparx5_vcap_cid_to_lookup(rule->vcap_chain_id);
+ keysetlist.max = ARRAY_SIZE(keysets);
+ keysetlist.keysets = keysets;
+ sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist, l3_proto);
+
+ /* Check if there is a match and return the match */
+ for (idx = 0; idx < kslist->cnt; ++idx)
+ for (jdx = 0; jdx < keysetlist.cnt; ++jdx)
+ if (kslist->keysets[idx] == keysets[jdx])
+ return kslist->keysets[idx];
+
+ pr_err("%s:%d: %s not supported in port key selection\n",
+ __func__, __LINE__,
+ sparx5_vcap_keyset_name(ndev, kslist->keysets[0]));
+
+ return -ENOENT;
+}
+
+/* API callback used for adding default fields to a rule */
+static void sparx5_vcap_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ const struct vcap_field *field;
+
+ field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK);
+ if (field && field->width == SPX5_PORTS)
+ sparx5_vcap_add_wide_port_mask(rule, ndev);
+ else if (field && field->width == BITS_PER_TYPE(u32))
+ sparx5_vcap_add_range_port_mask(rule, ndev);
+ else
+ pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n",
+ __func__, __LINE__, netdev_name(ndev),
+ sparx5_vcap_keyset_name(ndev, rule->keyset));
+ /* add the lookup bit */
+ if (sparx5_vcap_is2_is_first_chain(rule))
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+}
+
+/* API callback used for erasing the vcap cache area (not the register area) */
+static void sparx5_vcap_cache_erase(struct vcap_admin *admin)
+{
+ memset(admin->cache.keystream, 0, STREAMSIZE);
+ memset(admin->cache.maskstream, 0, STREAMSIZE);
+ memset(admin->cache.actionstream, 0, STREAMSIZE);
+ memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
+}
+
+/* API callback used for writing to the VCAP cache */
+static void sparx5_vcap_cache_write(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0xfff; /* counter limit */
+ if (admin->vinst == 0)
+ spx5_wr(admin->cache.counter, sparx5,
+ ANA_ACL_CNT_A(start));
+ else
+ spx5_wr(admin->cache.counter, sparx5,
+ ANA_ACL_CNT_B(start));
+ spx5_wr(admin->cache.sticky, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for reading from the VCAP into the VCAP cache */
+static void sparx5_vcap_cache_read(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] = ~spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ }
+ if (sel & VCAP_SEL_ACTION) {
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0xfff; /* counter limit */
+ if (admin->vinst == 0)
+ admin->cache.counter =
+ spx5_rd(sparx5, ANA_ACL_CNT_A(start));
+ else
+ admin->cache.counter =
+ spx5_rd(sparx5, ANA_ACL_CNT_B(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for initializing a VCAP address range */
+static void sparx5_vcap_range_init(struct net_device *ndev,
+ struct vcap_admin *admin, u32 addr,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ _sparx5_vcap_range_init(sparx5, admin, addr, count);
+}
+
+/* API callback used for updating the VCAP cache */
+static void sparx5_vcap_update(struct net_device *ndev,
+ struct vcap_admin *admin, enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ bool clear;
+
+ clear = (cmd == VCAP_CMD_INITIALIZE);
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+}
+
+/* API callback used for moving a block of rules in the VCAP */
+static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
+
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+}
+
+/* Enable all lookups in the VCAP instance */
+static int sparx5_vcap_enable(struct net_device *ndev,
+ struct vcap_admin *admin,
+ bool enable)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5;
+ int portno;
+
+ sparx5 = port->sparx5;
+ portno = port->portno;
+
+ /* For now we only consider IS2 */
+ if (enable)
+ spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+ else
+ spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+ return 0;
+}
+
+/* API callback operations: only IS2 is supported for now */
+static struct vcap_operations sparx5_vcap_ops = {
+ .validate_keyset = sparx5_vcap_validate_keyset,
+ .add_default_fields = sparx5_vcap_add_default_fields,
+ .cache_erase = sparx5_vcap_cache_erase,
+ .cache_write = sparx5_vcap_cache_write,
+ .cache_read = sparx5_vcap_cache_read,
+ .init = sparx5_vcap_range_init,
+ .update = sparx5_vcap_update,
+ .move = sparx5_vcap_move,
+ .port_info = sparx5_port_info,
+ .enable = sparx5_vcap_enable,
+};
+
+/* Enable lookups per port and set the keyset generation: only IS2 for now */
+static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ /* all traffic types generate the MAC_ETYPE keyset for now in all
+ * lookups on all ports
+ */
+ keysel = VCAP_IS2_KEYSEL(true, VCAP_IS2_PS_NONETH_MAC_ETYPE,
+ VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV6_MC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
+ VCAP_IS2_PS_ARP_ARP);
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ spx5_wr(keysel, sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ }
+ }
+}
+
+/* Disable lookups per port and set the keyset generation: only IS2 for now */
+static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno;
+
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+}
+
+static void sparx5_vcap_admin_free(struct vcap_admin *admin)
+{
+ if (!admin)
+ return;
+ mutex_destroy(&admin->lock);
+ kfree(admin->cache.keystream);
+ kfree(admin->cache.maskstream);
+ kfree(admin->cache.actionstream);
+ kfree(admin);
+}
+
+/* Allocate a vcap instance with a rule list and a cache area */
+static struct vcap_admin *
+sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl,
+ const struct sparx5_vcap_inst *cfg)
+{
+ struct vcap_admin *admin;
+
+ admin = kzalloc(sizeof(*admin), GFP_KERNEL);
+ if (!admin)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
+ admin->vtype = cfg->vtype;
+ admin->vinst = cfg->vinst;
+ admin->lookups = cfg->lookups;
+ admin->lookups_per_instance = cfg->lookups_per_instance;
+ admin->first_cid = cfg->first_cid;
+ admin->last_cid = cfg->last_cid;
+ admin->cache.keystream =
+ kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.maskstream =
+ kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.actionstream =
+ kzalloc(STREAMSIZE, GFP_KERNEL);
+ if (!admin->cache.keystream || !admin->cache.maskstream ||
+ !admin->cache.actionstream) {
+ sparx5_vcap_admin_free(admin);
+ return ERR_PTR(-ENOMEM);
+ }
+ return admin;
+}
+
+/* Do block allocations and provide addresses for VCAP instances */
+static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ const struct sparx5_vcap_inst *cfg)
+{
+ int idx;
+
+ /* Super VCAP block mapping and address configuration. Block 0
+ * is assigned addresses 0 through 3071, block 1 is assigned
+ * addresses 3072 though 6143, and so on.
+ */
+ for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks; ++idx) {
+ spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_SUPER_IDX);
+ spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id), sparx5,
+ VCAP_SUPER_MAP);
+ }
+ admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
+ admin->last_used_addr = admin->first_valid_addr +
+ cfg->blocks * SUPER_VCAP_BLK_SIZE;
+ admin->last_valid_addr = admin->last_used_addr - 1;
+}
+
+/* Allocate a vcap control and vcap instances and configure the system */
+int sparx5_vcap_init(struct sparx5 *sparx5)
+{
+ const struct sparx5_vcap_inst *cfg;
+ struct vcap_control *ctrl;
+ struct vcap_admin *admin;
+ struct dentry *dir;
+ int err = 0, idx;
+
+ /* Create a VCAP control instance that owns the platform specific VCAP
+ * model with VCAP instances and information about keysets, keys,
+ * actionsets and actions
+ * - Create administrative state for each available VCAP
+ * - Lists of rules
+ * - Address information
+ * - Initialize VCAP blocks
+ * - Configure port keysets
+ */
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ sparx5->vcap_ctrl = ctrl;
+ /* select the sparx5 VCAP model */
+ ctrl->vcaps = sparx5_vcaps;
+ ctrl->stats = &sparx5_vcap_stats;
+ /* Setup callbacks to allow the API to use the VCAP HW */
+ ctrl->ops = &sparx5_vcap_ops;
+
+ INIT_LIST_HEAD(&ctrl->list);
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) {
+ cfg = &sparx5_vcap_inst_cfg[idx];
+ admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg);
+ if (IS_ERR(admin)) {
+ err = PTR_ERR(admin);
+ pr_err("%s:%d: vcap allocation failed: %d\n",
+ __func__, __LINE__, err);
+ return err;
+ }
+ sparx5_vcap_block_alloc(sparx5, admin, cfg);
+ sparx5_vcap_block_init(sparx5, admin);
+ if (cfg->vinst == 0)
+ sparx5_vcap_port_key_selection(sparx5, admin);
+ list_add_tail(&admin->list, &ctrl->list);
+ }
+ dir = vcap_debugfs(sparx5->dev, sparx5->debugfs_root, ctrl);
+ for (idx = 0; idx < SPX5_PORTS; ++idx)
+ if (sparx5->ports[idx])
+ vcap_port_debugfs(sparx5->dev, dir, ctrl,
+ sparx5->ports[idx]->ndev);
+
+ return err;
+}
+
+void sparx5_vcap_destroy(struct sparx5 *sparx5)
+{
+ struct vcap_control *ctrl = sparx5->vcap_ctrl;
+ struct vcap_admin *admin, *admin_next;
+
+ if (!ctrl)
+ return;
+
+ list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) {
+ sparx5_vcap_port_key_deselection(sparx5, admin);
+ vcap_del_rules(ctrl, admin);
+ list_del(&admin->list);
+ sparx5_vcap_admin_free(admin);
+ }
+ kfree(ctrl);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
new file mode 100644
index 000000000000..0a0f2412c980
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver VCAP implementation
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+
+#ifndef __SPARX5_VCAP_IMPL_H__
+#define __SPARX5_VCAP_IMPL_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define SPARX5_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
+#define SPARX5_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
+#define SPARX5_VCAP_CID_IS2_L2 VCAP_CID_INGRESS_STAGE2_L2 /* IS2 lookup 2 */
+#define SPARX5_VCAP_CID_IS2_L3 VCAP_CID_INGRESS_STAGE2_L3 /* IS2 lookup 3 */
+#define SPARX5_VCAP_CID_IS2_MAX \
+ (VCAP_CID_INGRESS_STAGE2_L3 + VCAP_CID_LOOKUP_SIZE - 1) /* IS2 Max */
+
+/* IS2 port keyset selection control */
+
+/* IS2 non-ethernet traffic type keyset generation */
+enum vcap_is2_port_sel_noneth {
+ VCAP_IS2_PS_NONETH_MAC_ETYPE,
+ VCAP_IS2_PS_NONETH_CUSTOM_1,
+ VCAP_IS2_PS_NONETH_CUSTOM_2,
+ VCAP_IS2_PS_NONETH_NO_LOOKUP
+};
+
+/* IS2 IPv4 unicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv4_uc {
+ VCAP_IS2_PS_IPV4_UC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV4_UC_IP_7TUPLE,
+};
+
+/* IS2 IPv4 multicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv4_mc {
+ VCAP_IS2_PS_IPV4_MC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
+ VCAP_IS2_PS_IPV4_MC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV4_MC_IP4_VID,
+};
+
+/* IS2 IPv6 unicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv6_uc {
+ VCAP_IS2_PS_IPV6_UC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV6_UC_IP6_STD,
+ VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER,
+};
+
+/* IS2 IPv6 multicast traffic type keyset generation */
+enum vcap_is2_port_sel_ipv6_mc {
+ VCAP_IS2_PS_IPV6_MC_MAC_ETYPE,
+ VCAP_IS2_PS_IPV6_MC_IP_7TUPLE,
+ VCAP_IS2_PS_IPV6_MC_IP6_VID,
+ VCAP_IS2_PS_IPV6_MC_IP6_STD,
+ VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER,
+};
+
+/* IS2 ARP traffic type keyset generation */
+enum vcap_is2_port_sel_arp {
+ VCAP_IS2_PS_ARP_MAC_ETYPE,
+ VCAP_IS2_PS_ARP_ARP,
+};
+
+/* Get the port keyset for the vcap lookup */
+int sparx5_vcap_get_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ struct vcap_keyset_list *kslist);
+
+#endif /* __SPARX5_VCAP_IMPL_H__ */
diff --git a/drivers/net/ethernet/microchip/vcap/Kconfig b/drivers/net/ethernet/microchip/vcap/Kconfig
new file mode 100644
index 000000000000..97f43fd4473f
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/Kconfig
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip VCAP API configuration
+#
+
+if NET_VENDOR_MICROCHIP
+
+config VCAP
+ bool "VCAP (Versatile Content-Aware Processor) library"
+ help
+ Provides the basic VCAP functionality for multiple Microchip switchcores
+
+ A VCAP is essentially a TCAM with rules consisting of
+
+ - Programmable key fields
+ - Programmable action fields
+ - A counter (which may be only one bit wide)
+
+ Besides this each VCAP has:
+
+ - A number of lookups
+ - A keyset configuration per port per lookup
+
+ The VCAP implementation provides switchcore independent handling of rules
+ and supports:
+
+ - Creating and deleting rules
+ - Updating and getting rules
+
+ The platform specific configuration as well as the platform specific model
+ of the VCAP instances are attached to the VCAP API and a client can then
+ access rules via the API in a platform independent way, with the
+ limitations that each VCAP has in terms of its supported keys and actions.
+
+ Different switchcores will have different VCAP instances with different
+ characteristics. Look in the datasheet for the VCAP specifications for the
+ specific switchcore.
+
+config VCAP_KUNIT_TEST
+ bool "KUnit test for VCAP library" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && VCAP=y && y
+ select DEBUG_FS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the VCAP library.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/vcap/Makefile b/drivers/net/ethernet/microchip/vcap/Makefile
new file mode 100644
index 000000000000..0adb8f5a8735
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip VCAP API
+#
+
+obj-$(CONFIG_VCAP) += vcap.o
+obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o
+vcap-$(CONFIG_DEBUG_FS) += vcap_api_debugfs.o
+
+vcap-y += vcap_api.o
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
new file mode 100644
index 000000000000..84de2aee4169
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -0,0 +1,735 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
+ * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+ */
+
+#ifndef __VCAP_AG_API__
+#define __VCAP_AG_API__
+
+enum vcap_type {
+ VCAP_TYPE_ES2,
+ VCAP_TYPE_IS0,
+ VCAP_TYPE_IS2,
+ VCAP_TYPE_MAX
+};
+
+/* Keyfieldset names with origin information */
+enum vcap_keyfield_set {
+ VCAP_KFS_NO_VALUE, /* initial value */
+ VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_ETAG, /* sparx5 is0 X2 */
+ VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP4_VID, /* sparx5 es2 X3 */
+ VCAP_KFS_IP6_STD, /* sparx5 is2 X6 */
+ VCAP_KFS_IP6_VID, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12, sparx5 es2 X12 */
+ VCAP_KFS_LL_FULL, /* sparx5 is0 X6 */
+ VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_MLL, /* sparx5 is0 X3 */
+ VCAP_KFS_NORMAL, /* sparx5 is0 X6 */
+ VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
+ VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
+ VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
+ VCAP_KFS_TRI_VID, /* sparx5 is0 X2 */
+ VCAP_KFS_MAC_LLC, /* lan966x is2 X2 */
+ VCAP_KFS_MAC_SNAP, /* lan966x is2 X2 */
+ VCAP_KFS_OAM, /* lan966x is2 X2 */
+ VCAP_KFS_IP6_TCP_UDP, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_OTHER, /* lan966x is2 X4 */
+ VCAP_KFS_SMAC_SIP4, /* lan966x is2 X1 */
+ VCAP_KFS_SMAC_SIP6, /* lan966x is2 X2 */
+};
+
+/* List of keyfields with description
+ *
+ * Keys ending in _IS are booleans derived from frame data
+ * Keys ending in _CLS are classified frame data
+ *
+ * VCAP_KF_8021BR_ECID_BASE: W12, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021BR_ECID_EXT: W8, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021BR_E_TAGGED: W1, sparx5: is0
+ * Set for frames containing an E-TAG (802.1BR Ethertype 893f)
+ * VCAP_KF_8021BR_GRP: W2, sparx5: is0
+ * E-Tag group bits in 802.1BR Bridge Port Extension
+ * VCAP_KF_8021BR_IGR_ECID_BASE: W12, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021BR_IGR_ECID_EXT: W8, sparx5: is0
+ * Used by 802.1BR Bridge Port Extension in an E-Tag
+ * VCAP_KF_8021Q_DEI0: W1, sparx5: is0
+ * First DEI in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_DEI1: W1, sparx5: is0
+ * Second DEI in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_DEI2: W1, sparx5: is0
+ * Third DEI in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_DEI_CLS: W1, sparx5: is2/es2, lan966x: is2
+ * Classified DEI
+ * VCAP_KF_8021Q_PCP0: W3, sparx5: is0
+ * First PCP in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_PCP1: W3, sparx5: is0
+ * Second PCP in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_PCP2: W3, sparx5: is0
+ * Third PCP in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2/es2, lan966x: is2
+ * Classified PCP
+ * VCAP_KF_8021Q_TPID0: W3, sparx5: is0
+ * First TPIC in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_TPID1: W3, sparx5: is0
+ * Second TPID in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_TPID2: W3, sparx5: is0
+ * Third TPID in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_VID0: W12, sparx5: is0
+ * First VID in multiple vlan tags (outer tag or default port tag)
+ * VCAP_KF_8021Q_VID1: W12, sparx5: is0
+ * Second VID in multiple vlan tags (inner tag)
+ * VCAP_KF_8021Q_VID2: W12, sparx5: is0
+ * Third VID in multiple vlan tags (not always available)
+ * VCAP_KF_8021Q_VID_CLS: W13, sparx5: is2/es2, lan966x is2 W12
+ * Classified VID
+ * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has
+ * one or more Q-tags. Independent of port VLAN awareness
+ * VCAP_KF_8021Q_VLAN_TAGS: W3, sparx5: is0
+ * Number of VLAN tags in frame: 0: Untagged, 1: Single tagged, 3: Double
+ * tagged, 7: Triple tagged
+ * VCAP_KF_ACL_GRP_ID: W8, sparx5: es2
+ * Used in interface map table
+ * VCAP_KF_ARP_ADDR_SPACE_OK_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if hardware address is Ethernet
+ * VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP).
+ * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2, lan966x: i2
+ * ARP opcode
+ * VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if not one of the codes defined in VCAP_KF_ARP_OPCODE
+ * VCAP_KF_ARP_PROTO_SPACE_OK_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if protocol address space is 0x0800
+ * VCAP_KF_ARP_SENDER_MATCH_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Sender Hardware Address = SMAC (ARP)
+ * VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Target Hardware Address = SMAC (RARP)
+ * VCAP_KF_COSID_CLS: W3, sparx5: es2
+ * Class of service
+ * VCAP_KF_DST_ENTRY: W1, sparx5: is0
+ * Selects whether the frame’s destination or source information is used for
+ * fields L2_SMAC and L3_IP4_SIP
+ * VCAP_KF_ES0_ISDX_KEY_ENA: W1, sparx5: es2
+ * The value taken from the IFH .FWD.ES0_ISDX_KEY_ENA
+ * VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2, lan966x: is2
+ * Ethernet type
+ * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2
+ * Set if frame has EtherType >= 0x600
+ * VCAP_KF_ETYPE_MPLS: W2, sparx5: is0
+ * Type of MPLS Ethertype (or not)
+ * VCAP_KF_IF_EGR_PORT_MASK: W32, sparx5: es2
+ * Egress port mask, one bit per port
+ * VCAP_KF_IF_EGR_PORT_MASK_RNG: W3, sparx5: es2
+ * Select which 32 port group is available in IF_EGR_PORT (or virtual ports or
+ * CPU queue)
+ * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9
+ * Sparx5: Logical ingress port number retrieved from
+ * ANA_CL::PORT_ID_CFG.LPORT_NUM or ERLEG, LAN966x: ingress port nunmber
+ * VCAP_KF_IF_IGR_PORT_MASK: sparx5 is0 W65, sparx5 is2 W32, sparx5 is2 W65,
+ * lan966x is2 W9
+ * Ingress port mask, one bit per port/erleg
+ * VCAP_KF_IF_IGR_PORT_MASK_L3: W1, sparx5: is2
+ * If set, IF_IGR_PORT_MASK, IF_IGR_PORT_MASK_RNG, and IF_IGR_PORT_MASK_SEL are
+ * used to specify L3 interfaces
+ * VCAP_KF_IF_IGR_PORT_MASK_RNG: W4, sparx5: is2
+ * Range selector for IF_IGR_PORT_MASK. Specifies which group of 32 ports are
+ * available in IF_IGR_PORT_MASK
+ * VCAP_KF_IF_IGR_PORT_MASK_SEL: W2, sparx5: is0/is2
+ * Mode selector for IF_IGR_PORT_MASK, applicable when IF_IGR_PORT_MASK_L3 == 0.
+ * Mapping: 0: DEFAULT 1: LOOPBACK 2: MASQUERADE 3: CPU_VD
+ * VCAP_KF_IF_IGR_PORT_SEL: W1, sparx5: es2
+ * Selector for IF_IGR_PORT: physical port number or ERLEG
+ * VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame has EtherType = 0x800 and IP version = 4
+ * VCAP_KF_IP_MC_IS: W1, sparx5: is0
+ * Set if frame is IPv4 frame and frame’s destination MAC address is an IPv4
+ * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame’s
+ * destination MAC address is an IPv6 multicast address (0x3333/16).
+ * VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0
+ * Payload bytes after IP header
+ * VCAP_KF_IP_SNAP_IS: W1, sparx5: is0
+ * Set if frame is IPv4, IPv6, or SNAP frame
+ * VCAP_KF_ISDX_CLS: W12, sparx5: is2/es2
+ * Classified ISDX
+ * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if classified ISDX > 0
+ * VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame’s destination MAC address is the broadcast address
+ * (FF-FF-FF-FF-FF-FF).
+ * VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2, lan966x: is2
+ * Destination MAC address
+ * VCAP_KF_L2_FWD_IS: W1, sparx5: is2
+ * Set if the frame is allowed to be forwarded to front ports
+ * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan9966x is2
+ * Set if frame’s destination MAC address is a multicast address (bit 40 = 1).
+ * VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2/es2
+ * Byte 0-7 of L2 payload after Type/Len field and overloading for OAM
+ * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x is2
+ * Source MAC address
+ * VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if Src IP matches Dst IP address
+ * VCAP_KF_L3_DMAC_DIP_MATCH: W1, sparx5: is2
+ * Match found in DIP security lookup in ANA_L3
+ * VCAP_KF_L3_DPL_CLS: W1, sparx5: es2
+ * The frames drop precedence level
+ * VCAP_KF_L3_DSCP: W6, sparx5: is0
+ * Frame’s DSCP value
+ * VCAP_KF_L3_DST_IS: W1, sparx5: is2
+ * Set if lookup is done for egress router leg
+ * VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is0/is2/es2
+ * L3 Fragmentation type (none, initial, suspicious, valid follow up)
+ * VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is0/is2
+ * Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L
+ * EN
+ * VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2, lan966x: is2
+ * Destination IPv4 Address
+ * VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2, lan966x: is2
+ * Source IPv4 Address
+ * VCAP_KF_L3_IP6_DIP: W128, sparx5: is0/is2/es2, lan966x: is2
+ * Sparx5: Full IPv6 DIP, LAN966x: Either Full IPv6 DIP or a subset depending on
+ * frame type
+ * VCAP_KF_L3_IP6_SIP: W128, sparx5: is0/is2/es2, lan966x: is2
+ * Sparx5: Full IPv6 SIP, LAN966x: Either Full IPv6 SIP or a subset depending on
+ * frame type
+ * VCAP_KF_L3_IP_PROTO: W8, sparx5: is0/is2/es2, lan966x: is2
+ * IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4
+ * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if IPv4 frame contains options (IP len > 5)
+ * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96,
+ * lan966x is2 W56
+ * Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so
+ * payload is always taken 20 bytes after the start of the IPv4 header, LAN966x:
+ * Bytes 0-6 after IP header
+ * VCAP_KF_L3_RT_IS: W1, sparx5: is2/es2
+ * Set if frame has hit a router leg
+ * VCAP_KF_L3_SMAC_SIP_MATCH: W1, sparx5: is2
+ * Match found in SIP security lookup in ANA_L3
+ * VCAP_KF_L3_TOS: W8, sparx5: is2/es2, lan966x: is2
+ * Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field
+ * VCAP_KF_L3_TTL_GT0: W1, sparx5: is2/es2, lan966x: is2
+ * Set if IPv4 TTL / IPv6 hop limit is greater than 0
+ * VCAP_KF_L4_ACK: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2
+ * (unicastFlag)
+ * VCAP_KF_L4_DPORT: W16, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP
+ * frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port
+ * VCAP_KF_L4_FIN: W1, sparx5: is2/es2
+ * TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1
+ * VCAP_KF_L4_PAYLOAD: W64, sparx5: is2/es2
+ * Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP
+ * frames: Payload bytes 0–7 after IP header. IPv4 options are not parsed so
+ * payload is always taken 20 bytes after the start of the IPv4 header for non
+ * TCP/UDP IPv4 frames
+ * VCAP_KF_L4_PSH: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit
+ * 1 (twoStepFlag)
+ * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x: is2
+ * Range checker bitmask (one for each range checker). Input into range checkers
+ * is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE,
+ * outer VID, inner VID)
+ * VCAP_KF_L4_RST: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType
+ * bit 3
+ * VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP:
+ * messageType bit 0
+ * VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is2
+ * TCP/UDP source port
+ * VCAP_KF_L4_SPORT_EQ_DPORT_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if UDP or TCP source port equals UDP or TCP destination port
+ * VCAP_KF_L4_SYN: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag SYN, LAN966x: TCP: TCP flag SYN. PTP over UDP: messageType
+ * bit 2
+ * VCAP_KF_L4_URG: W1, sparx5: is2/es2, lan966x: is2
+ * Sparx5: TCP flag URG, LAN966x: TCP: TCP flag URG. PTP over UDP: flagField bit
+ * 7 (reserved)
+ * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Selects between entries relevant for first and second lookup. Set for first
+ * lookup, cleared for second lookup.
+ * VCAP_KF_LOOKUP_GEN_IDX: W12, sparx5: is0
+ * Generic index - for chaining CLM instances
+ * VCAP_KF_LOOKUP_GEN_IDX_SEL: W2, sparx5: is0
+ * Select the mode of the Generic Index
+ * VCAP_KF_LOOKUP_PAG: W8, sparx5: is2, lan966x: is2
+ * Classified Policy Association Group: chains rules from IS1/CLM to IS2
+ * VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2/es2, lan966x: is2
+ * Dual-ended loss measurement counters in CCM frames are all zero
+ * VCAP_KF_OAM_MEL_FLAGS: W7, sparx5: is0, lan966x: is2
+ * Encoding of MD level/MEG level (MEL)
+ * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame’s EtherType = 0x8902
+ * VCAP_KF_PROT_ACTIVE: W1, sparx5: es2
+ * Protection is active
+ * VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next
+ * header = 6)
+ * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6
+ * or 17)
+ * VCAP_KF_TYPE: sparx5 is0 W2, sparx5 is0 W1, sparx5 is2 W4, sparx5 is2 W2,
+ * sparx5 es2 W3, lan966x: is2
+ * Keyset type id - set by the API
+ * VCAP_KF_HOST_MATCH: W1, lan966x: is2
+ * The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
+ * guarding.
+ * VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
+ * Frame subtype for specific EtherTypes (MRP, DLR)
+ * VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
+ * Payload bytes 0-1 after the frame’s EtherType
+ * VCAP_KF_L2_PAYLOAD1: W8, lan966x: is2
+ * Payload byte 4 after the frame’s EtherType. This is specifically for PTP
+ * frames.
+ * VCAP_KF_L2_PAYLOAD2: W3, lan966x: is2
+ * Bits 7, 2, and 1 from payload byte 6 after the frame’s EtherType. This is
+ * specifically for PTP frames.
+ * VCAP_KF_L2_LLC: W40, lan966x: is2
+ * LLC header and data after up to two VLAN tags and the type/length field
+ * VCAP_KF_L3_FRAGMENT: W1, lan966x: is2
+ * Set if IPv4 frame is fragmented
+ * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is2
+ * Set if IPv4 frame is fragmented and it is not the first fragment
+ * VCAP_KF_L2_SNAP: W40, lan966x: is2
+ * SNAP header after LLC header (AA-AA-03)
+ * VCAP_KF_L4_1588_DOM: W8, lan966x: is2
+ * PTP over UDP: domainNumber
+ * VCAP_KF_L4_1588_VER: W4, lan966x: is2
+ * PTP over UDP: version
+ * VCAP_KF_OAM_MEPID: W16, lan966x: is2
+ * CCM frame’s OAM MEP ID
+ * VCAP_KF_OAM_OPCODE: W8, lan966x: is2
+ * Frame’s OAM opcode
+ * VCAP_KF_OAM_VER: W5, lan966x: is2
+ * Frame’s OAM version
+ * VCAP_KF_OAM_FLAGS: W8, lan966x: is2
+ * Frame’s OAM flags
+ * VCAP_KF_OAM_DETECTED: W1, lan966x: is2
+ * This is missing in the datasheet, but present in the OAM keyset in XML
+ */
+
+/* Keyfield names */
+enum vcap_key_field {
+ VCAP_KF_NO_VALUE, /* initial value */
+ VCAP_KF_8021BR_ECID_BASE,
+ VCAP_KF_8021BR_ECID_EXT,
+ VCAP_KF_8021BR_E_TAGGED,
+ VCAP_KF_8021BR_GRP,
+ VCAP_KF_8021BR_IGR_ECID_BASE,
+ VCAP_KF_8021BR_IGR_ECID_EXT,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_8021Q_VLAN_TAGGED_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_ACL_GRP_ID,
+ VCAP_KF_ARP_ADDR_SPACE_OK_IS,
+ VCAP_KF_ARP_LEN_OK_IS,
+ VCAP_KF_ARP_OPCODE,
+ VCAP_KF_ARP_OPCODE_UNKNOWN_IS,
+ VCAP_KF_ARP_PROTO_SPACE_OK_IS,
+ VCAP_KF_ARP_SENDER_MATCH_IS,
+ VCAP_KF_ARP_TGT_MATCH_IS,
+ VCAP_KF_COSID_CLS,
+ VCAP_KF_DST_ENTRY,
+ VCAP_KF_ES0_ISDX_KEY_ENA,
+ VCAP_KF_ETYPE,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE_MPLS,
+ VCAP_KF_IF_EGR_PORT_MASK,
+ VCAP_KF_IF_EGR_PORT_MASK_RNG,
+ VCAP_KF_IF_IGR_PORT,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_IF_IGR_PORT_MASK_L3,
+ VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_SEL,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_IP_PAYLOAD_5TUPLE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_ISDX_CLS,
+ VCAP_KF_ISDX_GT0_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_FWD_IS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_PAYLOAD_ETYPE,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_L3_DIP_EQ_SIP_IS,
+ VCAP_KF_L3_DMAC_DIP_MATCH,
+ VCAP_KF_L3_DPL_CLS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_L3_DST_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_IP4_DIP,
+ VCAP_KF_L3_IP4_SIP,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_L3_IP_PROTO,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_PAYLOAD,
+ VCAP_KF_L3_RT_IS,
+ VCAP_KF_L3_SMAC_SIP_MATCH,
+ VCAP_KF_L3_TOS,
+ VCAP_KF_L3_TTL_GT0,
+ VCAP_KF_L4_ACK,
+ VCAP_KF_L4_DPORT,
+ VCAP_KF_L4_FIN,
+ VCAP_KF_L4_PAYLOAD,
+ VCAP_KF_L4_PSH,
+ VCAP_KF_L4_RNG,
+ VCAP_KF_L4_RST,
+ VCAP_KF_L4_SEQUENCE_EQ0_IS,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_SPORT_EQ_DPORT_IS,
+ VCAP_KF_L4_SYN,
+ VCAP_KF_L4_URG,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_PAG,
+ VCAP_KF_MIRROR_ENA,
+ VCAP_KF_OAM_CCM_CNTS_EQ0,
+ VCAP_KF_OAM_MEL_FLAGS,
+ VCAP_KF_OAM_Y1731_IS,
+ VCAP_KF_PROT_ACTIVE,
+ VCAP_KF_TCP_IS,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TYPE,
+ VCAP_KF_HOST_MATCH,
+ VCAP_KF_L2_FRM_TYPE,
+ VCAP_KF_L2_PAYLOAD0,
+ VCAP_KF_L2_PAYLOAD1,
+ VCAP_KF_L2_PAYLOAD2,
+ VCAP_KF_L2_LLC,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_KF_L2_SNAP,
+ VCAP_KF_L4_1588_DOM,
+ VCAP_KF_L4_1588_VER,
+ VCAP_KF_OAM_MEPID,
+ VCAP_KF_OAM_OPCODE,
+ VCAP_KF_OAM_VER,
+ VCAP_KF_OAM_FLAGS,
+ VCAP_KF_OAM_DETECTED,
+};
+
+/* Actionset names with origin information */
+enum vcap_actionfield_set {
+ VCAP_AFS_NO_VALUE, /* initial value */
+ VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3, sparx5 es2 X3, lan966x is2 X2 */
+ VCAP_AFS_CLASSIFICATION, /* sparx5 is0 X2 */
+ VCAP_AFS_CLASS_REDUCED, /* sparx5 is0 X1 */
+ VCAP_AFS_FULL, /* sparx5 is0 X3 */
+ VCAP_AFS_MLBS, /* sparx5 is0 X2 */
+ VCAP_AFS_MLBS_REDUCED, /* sparx5 is0 X1 */
+ VCAP_AFS_SMAC_SIP, /* lan966x is2 x1 */
+};
+
+/* List of actionfields with description
+ *
+ * VCAP_AF_CLS_VID_SEL: W3, sparx5: is0
+ * Controls the classified VID: 0: VID_NONE: No action. 1: VID_ADD: New VID =
+ * old VID + VID_VAL. 2: VID_REPLACE: New VID = VID_VAL. 3: VID_FIRST_TAG: New
+ * VID = VID from frame's first tag (outer tag) if available, otherwise VID_VAL.
+ * 4: VID_SECOND_TAG: New VID = VID from frame's second tag (middle tag) if
+ * available, otherwise VID_VAL. 5: VID_THIRD_TAG: New VID = VID from frame's
+ * third tag (inner tag) if available, otherwise VID_VAL.
+ * VCAP_AF_CNT_ID: sparx5 is2 W12, sparx5 es2 W11
+ * Counter ID, used per lookup to index the 4K frame counters (ANA_ACL:CNT_TBL).
+ * Multiple VCAP IS2 entries can use the same counter.
+ * VCAP_AF_COPY_PORT_NUM: W7, sparx5: es2
+ * QSYS port number when FWD_MODE is redirect or copy
+ * VCAP_AF_COPY_QUEUE_NUM: W16, sparx5: es2
+ * QSYS queue number when FWD_MODE is redirect or copy
+ * VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2/es2, lan966x: is2
+ * Setting this bit to 1 causes all frames that hit this action to be copied to
+ * the CPU extraction queue specified in CPU_QUEUE_NUM.
+ * VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2/es2, lan966x: is2
+ * CPU queue number. Used when CPU_COPY_ENA is set.
+ * VCAP_AF_DEI_ENA: W1, sparx5: is0
+ * If set, use DEI_VAL as classified DEI value. Otherwise, DEI from basic
+ * classification is used
+ * VCAP_AF_DEI_VAL: W1, sparx5: is0
+ * See DEI_ENA
+ * VCAP_AF_DP_ENA: W1, sparx5: is0
+ * If set, use DP_VAL as classified drop precedence level. Otherwise, drop
+ * precedence level from basic classification is used.
+ * VCAP_AF_DP_VAL: W2, sparx5: is0
+ * See DP_ENA.
+ * VCAP_AF_DSCP_ENA: W1, sparx5: is0
+ * If set, use DSCP_VAL as classified DSCP value. Otherwise, DSCP value from
+ * basic classification is used.
+ * VCAP_AF_DSCP_VAL: W6, sparx5: is0
+ * See DSCP_ENA.
+ * VCAP_AF_ES2_REW_CMD: W3, sparx5: es2
+ * Command forwarded to REW: 0: No action. 1: SWAP MAC addresses. 2: Do L2CP
+ * DMAC translation when entering or leaving a tunnel.
+ * VCAP_AF_FWD_MODE: W2, sparx5: es2
+ * Forward selector: 0: Forward. 1: Discard. 2: Redirect. 3: Copy.
+ * VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2/es2, lan966x: is2
+ * Setting this bit to 1 causes the first frame that hits this action where the
+ * HIT_CNT counter is zero to be copied to the CPU extraction queue specified in
+ * CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that
+ * hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE
+ * functionality, the HIT_CNT counter must be cleared.
+ * VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2/es2
+ * Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action
+ * even when the pipeline control has terminated the frame before VCAP IS2.
+ * VCAP_AF_INTR_ENA: W1, sparx5: is2/es2
+ * If set, an interrupt is triggered when this rule is hit
+ * VCAP_AF_ISDX_ADD_REPLACE_SEL: W1, sparx5: is0
+ * Controls the classified ISDX. 0: New ISDX = old ISDX + ISDX_VAL. 1: New ISDX
+ * = ISDX_VAL.
+ * VCAP_AF_ISDX_VAL: W12, sparx5: is0
+ * See isdx_add_replace_sel
+ * VCAP_AF_LRN_DIS: W1, sparx5: is2, lan966x: is2
+ * Setting this bit to 1 disables learning of frames hitting this action.
+ * VCAP_AF_MAP_IDX: W9, sparx5: is0
+ * Index for QoS mapping table lookup
+ * VCAP_AF_MAP_KEY: W3, sparx5: is0
+ * Key type for QoS mapping table lookup. 0: DEI0, PCP0 (outer tag). 1: DEI1,
+ * PCP1 (middle tag). 2: DEI2, PCP2 (inner tag). 3: MPLS TC. 4: PCP0 (outer
+ * tag). 5: E-DEI, E-PCP (E-TAG). 6: DSCP if available, otherwise none. 7: DSCP
+ * if available, otherwise DEI0, PCP0 (outer tag) if available using MAP_IDX+8,
+ * otherwise none
+ * VCAP_AF_MAP_LOOKUP_SEL: W2, sparx5: is0
+ * Selects which of the two QoS Mapping Table lookups that MAP_KEY and MAP_IDX
+ * are applied to. 0: No changes to the QoS Mapping Table lookup. 1: Update key
+ * type and index for QoS Mapping Table lookup #0. 2: Update key type and index
+ * for QoS Mapping Table lookup #1. 3: Reserved.
+ * VCAP_AF_MASK_MODE: W3, sparx5: is0/is2, lan966x is2 W2
+ * Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2:
+ * REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7:
+ * Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy
+ * forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by
+ * MASK_MODE.
+ * VCAP_AF_MATCH_ID: W16, sparx5: is0/is2
+ * Logical ID for the entry. The MATCH_ID is extracted together with the frame
+ * if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in
+ * IFH.CL_RSLT.
+ * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is0/is2
+ * Mask used by MATCH_ID.
+ * VCAP_AF_MIRROR_PROBE: W2, sparx5: is2
+ * Mirroring performed according to configuration of a mirror probe. 0: No
+ * mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2
+ * VCAP_AF_MIRROR_PROBE_ID: W2, sparx5: es2
+ * Signals a mirror probe to be placed in the IFH. Only possible when FWD_MODE
+ * is copy. 0: No mirroring. 1–3: Use mirror probe 0-2.
+ * VCAP_AF_NXT_IDX: W12, sparx5: is0
+ * Index used as part of key (field G_IDX) in the next lookup.
+ * VCAP_AF_NXT_IDX_CTRL: W3, sparx5: is0
+ * Controls the generation of the G_IDX used in the VCAP CLM next lookup
+ * VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0
+ * Bits set in this mask will override PAG_VAL from port profile.  New PAG =
+ * (PAG (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
+ * VCAP_AF_PAG_VAL: W8, sparx5: is0
+ * See PAG_OVERRIDE_MASK.
+ * VCAP_AF_PCP_ENA: W1, sparx5: is0
+ * If set, use PCP_VAL as classified PCP value. Otherwise, PCP from basic
+ * classification is used.
+ * VCAP_AF_PCP_VAL: W3, sparx5: is0
+ * See PCP_ENA.
+ * VCAP_AF_PIPELINE_FORCE_ENA: sparx5 is0 W2, sparx5 is2 W1
+ * If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if
+ * PIPELINE_PT == NONE. Overrules previous settings of pipeline point.
+ * VCAP_AF_PIPELINE_PT: W5, sparx5: is0/is2
+ * Pipeline point used if PIPELINE_FORCE_ENA is set
+ * VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2, lan966x: is2
+ * Setting this bit to 1 causes frames that hit this action to be policed by the
+ * ACL policer specified in POLICE_IDX. Only applies to the first lookup.
+ * VCAP_AF_POLICE_IDX: W6, sparx5: is2/es2, lan966x: is2 W9
+ * Selects VCAP policer used when policing frames (POLICE_ENA)
+ * VCAP_AF_POLICE_REMARK: W1, sparx5: es2
+ * If set, frames exceeding policer rates are marked as yellow but not
+ * discarded.
+ * VCAP_AF_PORT_MASK: sparx5 is0 W65, sparx5 is2 W68, lan966x is2 W8
+ * Port mask applied to the forwarding decision based on MASK_MODE.
+ * VCAP_AF_QOS_ENA: W1, sparx5: is0
+ * If set, use QOS_VAL as classified QoS class. Otherwise, QoS class from basic
+ * classification is used.
+ * VCAP_AF_QOS_VAL: W3, sparx5: is0
+ * See QOS_ENA.
+ * VCAP_AF_RT_DIS: W1, sparx5: is2
+ * If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also
+ * IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX.
+ * VCAP_AF_TYPE: W1, sparx5: is0
+ * Actionset type id - Set by the API
+ * VCAP_AF_VID_VAL: W13, sparx5: is0
+ * New VID Value
+ * VCAP_AF_MIRROR_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes frames to be mirrored to the mirror target
+ * port (ANA::MIRRPORPORTS).
+ * VCAP_AF_POLICE_VCAP_ONLY: W1, lan966x: is2
+ * Disable policing from QoS, and port policers. Only the VCAP policer
+ * selected by POLICE_IDX is active. Only applies to the second lookup.
+ * VCAP_AF_REW_OP: W16, lan966x: is2
+ * Rewriter operation command.
+ * VCAP_AF_ISDX_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes the classified ISDX to be set to the value of
+ * POLICE_IDX[8:0].
+ * VCAP_AF_ACL_ID: W6, lan966x: is2
+ * Logical ID for the entry. This ID is extracted together with the frame in
+ * the CPU extraction header. Only applicable to actions with CPU_COPY_ENA or
+ * HIT_ME_ONCE set.
+ * VCAP_AF_FWD_KILL_ENA: W1, lan966x: is2
+ * Setting this bit to 1 denies forwarding of the frame forwarding to any
+ * front port. The frame can still be copied to the CPU by other actions.
+ * VCAP_AF_HOST_MATCH: W1, lan966x: is2
+ * Used for IP source guarding. If set, it signals that the host is a valid
+ * (for instance a valid combination of source MAC address and source IP
+ * address). HOST_MATCH is input to the IS2 keys.
+ */
+
+/* Actionfield names */
+enum vcap_action_field {
+ VCAP_AF_NO_VALUE, /* initial value */
+ VCAP_AF_ACL_MAC,
+ VCAP_AF_ACL_RT_MODE,
+ VCAP_AF_CLS_VID_SEL,
+ VCAP_AF_CNT_ID,
+ VCAP_AF_COPY_PORT_NUM,
+ VCAP_AF_COPY_QUEUE_NUM,
+ VCAP_AF_COSID_ENA,
+ VCAP_AF_COSID_VAL,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_AF_CPU_DIS,
+ VCAP_AF_CPU_ENA,
+ VCAP_AF_CPU_Q,
+ VCAP_AF_CPU_QUEUE_NUM,
+ VCAP_AF_CUSTOM_ACE_ENA,
+ VCAP_AF_CUSTOM_ACE_OFFSET,
+ VCAP_AF_DEI_ENA,
+ VCAP_AF_DEI_VAL,
+ VCAP_AF_DLB_OFFSET,
+ VCAP_AF_DMAC_OFFSET_ENA,
+ VCAP_AF_DP_ENA,
+ VCAP_AF_DP_VAL,
+ VCAP_AF_DSCP_ENA,
+ VCAP_AF_DSCP_VAL,
+ VCAP_AF_EGR_ACL_ENA,
+ VCAP_AF_ES2_REW_CMD,
+ VCAP_AF_FWD_DIS,
+ VCAP_AF_FWD_MODE,
+ VCAP_AF_FWD_TYPE,
+ VCAP_AF_GVID_ADD_REPLACE_SEL,
+ VCAP_AF_HIT_ME_ONCE,
+ VCAP_AF_IGNORE_PIPELINE_CTRL,
+ VCAP_AF_IGR_ACL_ENA,
+ VCAP_AF_INJ_MASQ_ENA,
+ VCAP_AF_INJ_MASQ_LPORT,
+ VCAP_AF_INJ_MASQ_PORT,
+ VCAP_AF_INTR_ENA,
+ VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_AF_ISDX_VAL,
+ VCAP_AF_IS_INNER_ACL,
+ VCAP_AF_L3_MAC_UPDATE_DIS,
+ VCAP_AF_LOG_MSG_INTERVAL,
+ VCAP_AF_LPM_AFFIX_ENA,
+ VCAP_AF_LPM_AFFIX_VAL,
+ VCAP_AF_LPORT_ENA,
+ VCAP_AF_LRN_DIS,
+ VCAP_AF_MAP_IDX,
+ VCAP_AF_MAP_KEY,
+ VCAP_AF_MAP_LOOKUP_SEL,
+ VCAP_AF_MASK_MODE,
+ VCAP_AF_MATCH_ID,
+ VCAP_AF_MATCH_ID_MASK,
+ VCAP_AF_MIP_SEL,
+ VCAP_AF_MIRROR_PROBE,
+ VCAP_AF_MIRROR_PROBE_ID,
+ VCAP_AF_MPLS_IP_CTRL_ENA,
+ VCAP_AF_MPLS_MEP_ENA,
+ VCAP_AF_MPLS_MIP_ENA,
+ VCAP_AF_MPLS_OAM_FLAVOR,
+ VCAP_AF_MPLS_OAM_TYPE,
+ VCAP_AF_NUM_VLD_LABELS,
+ VCAP_AF_NXT_IDX,
+ VCAP_AF_NXT_IDX_CTRL,
+ VCAP_AF_NXT_KEY_TYPE,
+ VCAP_AF_NXT_NORMALIZE,
+ VCAP_AF_NXT_NORM_W16_OFFSET,
+ VCAP_AF_NXT_NORM_W32_OFFSET,
+ VCAP_AF_NXT_OFFSET_FROM_TYPE,
+ VCAP_AF_NXT_TYPE_AFTER_OFFSET,
+ VCAP_AF_OAM_IP_BFD_ENA,
+ VCAP_AF_OAM_TWAMP_ENA,
+ VCAP_AF_OAM_Y1731_SEL,
+ VCAP_AF_PAG_OVERRIDE_MASK,
+ VCAP_AF_PAG_VAL,
+ VCAP_AF_PCP_ENA,
+ VCAP_AF_PCP_VAL,
+ VCAP_AF_PIPELINE_ACT_SEL,
+ VCAP_AF_PIPELINE_FORCE_ENA,
+ VCAP_AF_PIPELINE_PT,
+ VCAP_AF_PIPELINE_PT_REDUCED,
+ VCAP_AF_POLICE_ENA,
+ VCAP_AF_POLICE_IDX,
+ VCAP_AF_POLICE_REMARK,
+ VCAP_AF_PORT_MASK,
+ VCAP_AF_PTP_MASTER_SEL,
+ VCAP_AF_QOS_ENA,
+ VCAP_AF_QOS_VAL,
+ VCAP_AF_REW_CMD,
+ VCAP_AF_RLEG_DMAC_CHK_DIS,
+ VCAP_AF_RLEG_STAT_IDX,
+ VCAP_AF_RSDX_ENA,
+ VCAP_AF_RSDX_VAL,
+ VCAP_AF_RSVD_LBL_VAL,
+ VCAP_AF_RT_DIS,
+ VCAP_AF_RT_SEL,
+ VCAP_AF_S2_KEY_SEL_ENA,
+ VCAP_AF_S2_KEY_SEL_IDX,
+ VCAP_AF_SAM_SEQ_ENA,
+ VCAP_AF_SIP_IDX,
+ VCAP_AF_SWAP_MAC_ENA,
+ VCAP_AF_TCP_UDP_DPORT,
+ VCAP_AF_TCP_UDP_ENA,
+ VCAP_AF_TCP_UDP_SPORT,
+ VCAP_AF_TC_ENA,
+ VCAP_AF_TC_LABEL,
+ VCAP_AF_TPID_SEL,
+ VCAP_AF_TTL_DECR_DIS,
+ VCAP_AF_TTL_ENA,
+ VCAP_AF_TTL_LABEL,
+ VCAP_AF_TTL_UPDATE_ENA,
+ VCAP_AF_TYPE,
+ VCAP_AF_VID_VAL,
+ VCAP_AF_VLAN_POP_CNT,
+ VCAP_AF_VLAN_POP_CNT_ENA,
+ VCAP_AF_VLAN_PUSH_CNT,
+ VCAP_AF_VLAN_PUSH_CNT_ENA,
+ VCAP_AF_VLAN_WAS_TAGGED,
+ VCAP_AF_MIRROR_ENA,
+ VCAP_AF_POLICE_VCAP_ONLY,
+ VCAP_AF_REW_OP,
+ VCAP_AF_ISDX_ENA,
+ VCAP_AF_ACL_ID,
+ VCAP_AF_FWD_KILL_ENA,
+ VCAP_AF_HOST_MATCH,
+};
+
+#endif /* __VCAP_AG_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
new file mode 100644
index 000000000000..664aae3e2acd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -0,0 +1,2883 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/types.h>
+
+#include "vcap_api_private.h"
+
+static int keyfield_size_table[] = {
+ [VCAP_FIELD_BIT] = sizeof(struct vcap_u1_key),
+ [VCAP_FIELD_U32] = sizeof(struct vcap_u32_key),
+ [VCAP_FIELD_U48] = sizeof(struct vcap_u48_key),
+ [VCAP_FIELD_U56] = sizeof(struct vcap_u56_key),
+ [VCAP_FIELD_U64] = sizeof(struct vcap_u64_key),
+ [VCAP_FIELD_U72] = sizeof(struct vcap_u72_key),
+ [VCAP_FIELD_U112] = sizeof(struct vcap_u112_key),
+ [VCAP_FIELD_U128] = sizeof(struct vcap_u128_key),
+};
+
+static int actionfield_size_table[] = {
+ [VCAP_FIELD_BIT] = sizeof(struct vcap_u1_action),
+ [VCAP_FIELD_U32] = sizeof(struct vcap_u32_action),
+ [VCAP_FIELD_U48] = sizeof(struct vcap_u48_action),
+ [VCAP_FIELD_U56] = sizeof(struct vcap_u56_action),
+ [VCAP_FIELD_U64] = sizeof(struct vcap_u64_action),
+ [VCAP_FIELD_U72] = sizeof(struct vcap_u72_action),
+ [VCAP_FIELD_U112] = sizeof(struct vcap_u112_action),
+ [VCAP_FIELD_U128] = sizeof(struct vcap_u128_action),
+};
+
+/* Moving a rule in the VCAP address space */
+struct vcap_rule_move {
+ int addr; /* address to move */
+ int offset; /* change in address */
+ int count; /* blocksize of addresses to move */
+};
+
+/* Stores the filter cookie that enabled the port */
+struct vcap_enabled_port {
+ struct list_head list; /* for insertion in enabled ports list */
+ struct net_device *ndev; /* the enabled port */
+ unsigned long cookie; /* filter that enabled the port */
+};
+
+void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset)
+{
+ memset(itr, 0, sizeof(*itr));
+ itr->offset = offset;
+ itr->sw_width = sw_width;
+ itr->regs_per_sw = DIV_ROUND_UP(sw_width, 32);
+ itr->tg = tg;
+}
+
+static void vcap_iter_skip_tg(struct vcap_stream_iter *itr)
+{
+ /* Compensate the field offset for preceding typegroups.
+ * A typegroup table ends with an all-zero terminator.
+ */
+ while (itr->tg->width && itr->offset >= itr->tg->offset) {
+ itr->offset += itr->tg->width;
+ itr->tg++; /* next typegroup */
+ }
+}
+
+void vcap_iter_update(struct vcap_stream_iter *itr)
+{
+ int sw_idx, sw_bitpos;
+
+ /* Calculate the subword index and bitposition for current bit */
+ sw_idx = itr->offset / itr->sw_width;
+ sw_bitpos = itr->offset % itr->sw_width;
+ /* Calculate the register index and bitposition for current bit */
+ itr->reg_idx = (sw_idx * itr->regs_per_sw) + (sw_bitpos / 32);
+ itr->reg_bitpos = sw_bitpos % 32;
+}
+
+void vcap_iter_init(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset)
+{
+ vcap_iter_set(itr, sw_width, tg, offset);
+ vcap_iter_skip_tg(itr);
+ vcap_iter_update(itr);
+}
+
+void vcap_iter_next(struct vcap_stream_iter *itr)
+{
+ itr->offset++;
+ vcap_iter_skip_tg(itr);
+ vcap_iter_update(itr);
+}
+
+static void vcap_set_bit(u32 *stream, struct vcap_stream_iter *itr, bool value)
+{
+ u32 mask = BIT(itr->reg_bitpos);
+ u32 *p = &stream[itr->reg_idx];
+
+ if (value)
+ *p |= mask;
+ else
+ *p &= ~mask;
+}
+
+static void vcap_encode_bit(u32 *stream, struct vcap_stream_iter *itr, bool val)
+{
+ /* When intersected by a type group field, stream the type group bits
+ * before continuing with the value bit
+ */
+ while (itr->tg->width &&
+ itr->offset >= itr->tg->offset &&
+ itr->offset < itr->tg->offset + itr->tg->width) {
+ int tg_bitpos = itr->tg->offset - itr->offset;
+
+ vcap_set_bit(stream, itr, (itr->tg->value >> tg_bitpos) & 0x1);
+ itr->offset++;
+ vcap_iter_update(itr);
+ }
+ vcap_set_bit(stream, itr, val);
+}
+
+static void vcap_encode_field(u32 *stream, struct vcap_stream_iter *itr,
+ int width, const u8 *value)
+{
+ int idx;
+
+ /* Loop over the field value bits and add the value bits one by one to
+ * the output stream.
+ */
+ for (idx = 0; idx < width; idx++) {
+ u8 bidx = idx & GENMASK(2, 0);
+
+ /* Encode one field value bit */
+ vcap_encode_bit(stream, itr, (value[idx / 8] >> bidx) & 0x1);
+ vcap_iter_next(itr);
+ }
+}
+
+static void vcap_encode_typegroups(u32 *stream, int sw_width,
+ const struct vcap_typegroup *tg,
+ bool mask)
+{
+ struct vcap_stream_iter iter;
+ int idx;
+
+ /* Mask bits must be set to zeros (inverted later when writing to the
+ * mask cache register), so that the mask typegroup bits consist of
+ * match-1 or match-0, or both
+ */
+ vcap_iter_set(&iter, sw_width, tg, 0);
+ while (iter.tg->width) {
+ /* Set position to current typegroup bit */
+ iter.offset = iter.tg->offset;
+ vcap_iter_update(&iter);
+ for (idx = 0; idx < iter.tg->width; idx++) {
+ /* Iterate over current typegroup bits. Mask typegroup
+ * bits are always set
+ */
+ if (mask)
+ vcap_set_bit(stream, &iter, 0x1);
+ else
+ vcap_set_bit(stream, &iter,
+ (iter.tg->value >> idx) & 0x1);
+ iter.offset++;
+ vcap_iter_update(&iter);
+ }
+ iter.tg++; /* next typegroup */
+ }
+}
+
+static bool vcap_bitarray_zero(int width, u8 *value)
+{
+ int bytes = DIV_ROUND_UP(width, BITS_PER_BYTE);
+ u8 total = 0, bmask = 0xff;
+ int rwidth = width;
+ int idx;
+
+ for (idx = 0; idx < bytes; ++idx, rwidth -= BITS_PER_BYTE) {
+ if (rwidth && rwidth < BITS_PER_BYTE)
+ bmask = (1 << rwidth) - 1;
+ total += value[idx] & bmask;
+ }
+ return total == 0;
+}
+
+static bool vcap_get_bit(u32 *stream, struct vcap_stream_iter *itr)
+{
+ u32 mask = BIT(itr->reg_bitpos);
+ u32 *p = &stream[itr->reg_idx];
+
+ return !!(*p & mask);
+}
+
+static void vcap_decode_field(u32 *stream, struct vcap_stream_iter *itr,
+ int width, u8 *value)
+{
+ int idx;
+
+ /* Loop over the field value bits and get the field bits and
+ * set them in the output value byte array
+ */
+ for (idx = 0; idx < width; idx++) {
+ u8 bidx = idx & 0x7;
+
+ /* Decode one field value bit */
+ if (vcap_get_bit(stream, itr))
+ *value |= 1 << bidx;
+ vcap_iter_next(itr);
+ if (bidx == 7)
+ value++;
+ }
+}
+
+/* Verify that the type id in the stream matches the type id of the keyset */
+static bool vcap_verify_keystream_keyset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *keystream,
+ u32 *mskstream,
+ enum vcap_keyfield_set keyset)
+{
+ const struct vcap_info *vcap = &vctrl->vcaps[vt];
+ const struct vcap_field *typefld;
+ const struct vcap_typegroup *tgt;
+ const struct vcap_field *fields;
+ struct vcap_stream_iter iter;
+ const struct vcap_set *info;
+ u32 value = 0;
+ u32 mask = 0;
+
+ if (vcap_keyfield_count(vctrl, vt, keyset) == 0)
+ return false;
+
+ info = vcap_keyfieldset(vctrl, vt, keyset);
+ /* Check that the keyset is valid */
+ if (!info)
+ return false;
+
+ /* a type_id of value -1 means that there is no type field */
+ if (info->type_id == (u8)-1)
+ return true;
+
+ /* Get a valid typegroup for the specific keyset */
+ tgt = vcap_keyfield_typegroup(vctrl, vt, keyset);
+ if (!tgt)
+ return false;
+
+ fields = vcap_keyfields(vctrl, vt, keyset);
+ if (!fields)
+ return false;
+
+ typefld = &fields[VCAP_KF_TYPE];
+ vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
+ vcap_decode_field(mskstream, &iter, typefld->width, (u8 *)&mask);
+ /* no type info if there are no mask bits */
+ if (vcap_bitarray_zero(typefld->width, (u8 *)&mask))
+ return false;
+
+ /* Get the value of the type field in the stream and compare to the
+ * one define in the vcap keyset
+ */
+ vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
+ vcap_decode_field(keystream, &iter, typefld->width, (u8 *)&value);
+
+ return (value & mask) == (info->type_id & mask);
+}
+
+/* Verify that the typegroup bits have the correct values */
+static int vcap_verify_typegroups(u32 *stream, int sw_width,
+ const struct vcap_typegroup *tgt, bool mask,
+ int sw_max)
+{
+ struct vcap_stream_iter iter;
+ int sw_cnt, idx;
+
+ vcap_iter_set(&iter, sw_width, tgt, 0);
+ sw_cnt = 0;
+ while (iter.tg->width) {
+ u32 value = 0;
+ u32 tg_value = iter.tg->value;
+
+ if (mask)
+ tg_value = (1 << iter.tg->width) - 1;
+ /* Set position to current typegroup bit */
+ iter.offset = iter.tg->offset;
+ vcap_iter_update(&iter);
+ for (idx = 0; idx < iter.tg->width; idx++) {
+ /* Decode one typegroup bit */
+ if (vcap_get_bit(stream, &iter))
+ value |= 1 << idx;
+ iter.offset++;
+ vcap_iter_update(&iter);
+ }
+ if (value != tg_value)
+ return -EINVAL;
+ iter.tg++; /* next typegroup */
+ sw_cnt++;
+ /* Stop checking more typegroups */
+ if (sw_max && sw_cnt >= sw_max)
+ break;
+ }
+ return 0;
+}
+
+/* Find the subword width of the key typegroup that matches the stream data */
+static int vcap_find_keystream_typegroup_sw(struct vcap_control *vctrl,
+ enum vcap_type vt, u32 *stream,
+ bool mask, int sw_max)
+{
+ const struct vcap_typegroup **tgt;
+ int sw_idx, res;
+
+ tgt = vctrl->vcaps[vt].keyfield_set_typegroups;
+ /* Try the longest subword match first */
+ for (sw_idx = vctrl->vcaps[vt].sw_count; sw_idx >= 0; sw_idx--) {
+ if (!tgt[sw_idx])
+ continue;
+
+ res = vcap_verify_typegroups(stream, vctrl->vcaps[vt].sw_width,
+ tgt[sw_idx], mask, sw_max);
+ if (res == 0)
+ return sw_idx;
+ }
+ return -EINVAL;
+}
+
+/* Verify that the typegroup information, subword count, keyset and type id
+ * are in sync and correct, return the list of matchin keysets
+ */
+int
+vcap_find_keystream_keysets(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *keystream,
+ u32 *mskstream,
+ bool mask, int sw_max,
+ struct vcap_keyset_list *kslist)
+{
+ const struct vcap_set *keyfield_set;
+ int sw_count, idx;
+
+ sw_count = vcap_find_keystream_typegroup_sw(vctrl, vt, keystream, mask,
+ sw_max);
+ if (sw_count < 0)
+ return sw_count;
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item != sw_count)
+ continue;
+
+ if (vcap_verify_keystream_keyset(vctrl, vt, keystream,
+ mskstream, idx))
+ vcap_keyset_list_add(kslist, idx);
+ }
+ if (kslist->cnt > 0)
+ return 0;
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vcap_find_keystream_keysets);
+
+/* Read key data from a VCAP address and discover if there are any rule keysets
+ * here
+ */
+int vcap_addr_keysets(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ struct vcap_admin *admin,
+ int addr,
+ struct vcap_keyset_list *kslist)
+{
+ enum vcap_type vt = admin->vtype;
+ int keyset_sw_regs, idx;
+ u32 key = 0, mask = 0;
+
+ /* Read the cache at the specified address */
+ keyset_sw_regs = DIV_ROUND_UP(vctrl->vcaps[vt].sw_width, 32);
+ vctrl->ops->update(ndev, admin, VCAP_CMD_READ, VCAP_SEL_ALL, addr);
+ vctrl->ops->cache_read(ndev, admin, VCAP_SEL_ENTRY, 0,
+ keyset_sw_regs);
+ /* Skip uninitialized key/mask entries */
+ for (idx = 0; idx < keyset_sw_regs; ++idx) {
+ key |= ~admin->cache.keystream[idx];
+ mask |= admin->cache.maskstream[idx];
+ }
+ if (key == 0 && mask == 0)
+ return -EINVAL;
+ /* Decode and locate the keysets */
+ return vcap_find_keystream_keysets(vctrl, vt, admin->cache.keystream,
+ admin->cache.maskstream, false, 0,
+ kslist);
+}
+EXPORT_SYMBOL_GPL(vcap_addr_keysets);
+
+/* Return the list of keyfields for the keyset */
+const struct vcap_field *vcap_keyfields(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset)
+{
+ /* Check that the keyset exists in the vcap keyset list */
+ if (keyset >= vctrl->vcaps[vt].keyfield_set_size)
+ return NULL;
+ return vctrl->vcaps[vt].keyfield_set_map[keyset];
+}
+
+/* Return the keyset information for the keyset */
+const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset)
+{
+ const struct vcap_set *kset;
+
+ /* Check that the keyset exists in the vcap keyset list */
+ if (keyset >= vctrl->vcaps[vt].keyfield_set_size)
+ return NULL;
+ kset = &vctrl->vcaps[vt].keyfield_set[keyset];
+ if (kset->sw_per_item == 0 || kset->sw_per_item > vctrl->vcaps[vt].sw_count)
+ return NULL;
+ return kset;
+}
+EXPORT_SYMBOL_GPL(vcap_keyfieldset);
+
+/* Return the typegroup table for the matching keyset (using subword size) */
+const struct vcap_typegroup *
+vcap_keyfield_typegroup(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset)
+{
+ const struct vcap_set *kset = vcap_keyfieldset(vctrl, vt, keyset);
+
+ /* Check that the keyset is valid */
+ if (!kset)
+ return NULL;
+ return vctrl->vcaps[vt].keyfield_set_typegroups[kset->sw_per_item];
+}
+
+/* Return the number of keyfields in the keyset */
+int vcap_keyfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset)
+{
+ /* Check that the keyset exists in the vcap keyset list */
+ if (keyset >= vctrl->vcaps[vt].keyfield_set_size)
+ return 0;
+ return vctrl->vcaps[vt].keyfield_set_map_size[keyset];
+}
+
+static void vcap_encode_keyfield(struct vcap_rule_internal *ri,
+ const struct vcap_client_keyfield *kf,
+ const struct vcap_field *rf,
+ const struct vcap_typegroup *tgt)
+{
+ int sw_width = ri->vctrl->vcaps[ri->admin->vtype].sw_width;
+ struct vcap_cache_data *cache = &ri->admin->cache;
+ struct vcap_stream_iter iter;
+ const u8 *value, *mask;
+
+ /* Encode the fields for the key and the mask in their respective
+ * streams, respecting the subword width.
+ */
+ switch (kf->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ value = &kf->data.u1.value;
+ mask = &kf->data.u1.mask;
+ break;
+ case VCAP_FIELD_U32:
+ value = (const u8 *)&kf->data.u32.value;
+ mask = (const u8 *)&kf->data.u32.mask;
+ break;
+ case VCAP_FIELD_U48:
+ value = kf->data.u48.value;
+ mask = kf->data.u48.mask;
+ break;
+ case VCAP_FIELD_U56:
+ value = kf->data.u56.value;
+ mask = kf->data.u56.mask;
+ break;
+ case VCAP_FIELD_U64:
+ value = kf->data.u64.value;
+ mask = kf->data.u64.mask;
+ break;
+ case VCAP_FIELD_U72:
+ value = kf->data.u72.value;
+ mask = kf->data.u72.mask;
+ break;
+ case VCAP_FIELD_U112:
+ value = kf->data.u112.value;
+ mask = kf->data.u112.mask;
+ break;
+ case VCAP_FIELD_U128:
+ value = kf->data.u128.value;
+ mask = kf->data.u128.mask;
+ break;
+ }
+ vcap_iter_init(&iter, sw_width, tgt, rf->offset);
+ vcap_encode_field(cache->keystream, &iter, rf->width, value);
+ vcap_iter_init(&iter, sw_width, tgt, rf->offset);
+ vcap_encode_field(cache->maskstream, &iter, rf->width, mask);
+}
+
+static void vcap_encode_keyfield_typegroups(struct vcap_control *vctrl,
+ struct vcap_rule_internal *ri,
+ const struct vcap_typegroup *tgt)
+{
+ int sw_width = vctrl->vcaps[ri->admin->vtype].sw_width;
+ struct vcap_cache_data *cache = &ri->admin->cache;
+
+ /* Encode the typegroup bits for the key and the mask in their streams,
+ * respecting the subword width.
+ */
+ vcap_encode_typegroups(cache->keystream, sw_width, tgt, false);
+ vcap_encode_typegroups(cache->maskstream, sw_width, tgt, true);
+}
+
+static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri)
+{
+ const struct vcap_client_keyfield *ckf;
+ const struct vcap_typegroup *tg_table;
+ const struct vcap_field *kf_table;
+ int keyset_size;
+
+ /* Get a valid set of fields for the specific keyset */
+ kf_table = vcap_keyfields(ri->vctrl, ri->admin->vtype, ri->data.keyset);
+ if (!kf_table) {
+ pr_err("%s:%d: no fields available for this keyset: %d\n",
+ __func__, __LINE__, ri->data.keyset);
+ return -EINVAL;
+ }
+ /* Get a valid typegroup for the specific keyset */
+ tg_table = vcap_keyfield_typegroup(ri->vctrl, ri->admin->vtype,
+ ri->data.keyset);
+ if (!tg_table) {
+ pr_err("%s:%d: no typegroups available for this keyset: %d\n",
+ __func__, __LINE__, ri->data.keyset);
+ return -EINVAL;
+ }
+ /* Get a valid size for the specific keyset */
+ keyset_size = vcap_keyfield_count(ri->vctrl, ri->admin->vtype,
+ ri->data.keyset);
+ if (keyset_size == 0) {
+ pr_err("%s:%d: zero field count for this keyset: %d\n",
+ __func__, __LINE__, ri->data.keyset);
+ return -EINVAL;
+ }
+ /* Iterate over the keyfields (key, mask) in the rule
+ * and encode these bits
+ */
+ if (list_empty(&ri->data.keyfields)) {
+ pr_err("%s:%d: no keyfields in the rule\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ /* Check that the client entry exists in the keyset */
+ if (ckf->ctrl.key >= keyset_size) {
+ pr_err("%s:%d: key %d is not in vcap\n",
+ __func__, __LINE__, ckf->ctrl.key);
+ return -EINVAL;
+ }
+ vcap_encode_keyfield(ri, ckf, &kf_table[ckf->ctrl.key], tg_table);
+ }
+ /* Add typegroup bits to the key/mask bitstreams */
+ vcap_encode_keyfield_typegroups(ri->vctrl, ri, tg_table);
+ return 0;
+}
+
+/* Return the list of actionfields for the actionset */
+const struct vcap_field *
+vcap_actionfields(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset)
+{
+ /* Check that the actionset exists in the vcap actionset list */
+ if (actionset >= vctrl->vcaps[vt].actionfield_set_size)
+ return NULL;
+ return vctrl->vcaps[vt].actionfield_set_map[actionset];
+}
+
+const struct vcap_set *
+vcap_actionfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset)
+{
+ const struct vcap_set *aset;
+
+ /* Check that the actionset exists in the vcap actionset list */
+ if (actionset >= vctrl->vcaps[vt].actionfield_set_size)
+ return NULL;
+ aset = &vctrl->vcaps[vt].actionfield_set[actionset];
+ if (aset->sw_per_item == 0 || aset->sw_per_item > vctrl->vcaps[vt].sw_count)
+ return NULL;
+ return aset;
+}
+
+/* Return the typegroup table for the matching actionset (using subword size) */
+const struct vcap_typegroup *
+vcap_actionfield_typegroup(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset)
+{
+ const struct vcap_set *aset = vcap_actionfieldset(vctrl, vt, actionset);
+
+ /* Check that the actionset is valid */
+ if (!aset)
+ return NULL;
+ return vctrl->vcaps[vt].actionfield_set_typegroups[aset->sw_per_item];
+}
+
+/* Return the number of actionfields in the actionset */
+int vcap_actionfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_actionfield_set actionset)
+{
+ /* Check that the actionset exists in the vcap actionset list */
+ if (actionset >= vctrl->vcaps[vt].actionfield_set_size)
+ return 0;
+ return vctrl->vcaps[vt].actionfield_set_map_size[actionset];
+}
+
+static void vcap_encode_actionfield(struct vcap_rule_internal *ri,
+ const struct vcap_client_actionfield *af,
+ const struct vcap_field *rf,
+ const struct vcap_typegroup *tgt)
+{
+ int act_width = ri->vctrl->vcaps[ri->admin->vtype].act_width;
+
+ struct vcap_cache_data *cache = &ri->admin->cache;
+ struct vcap_stream_iter iter;
+ const u8 *value;
+
+ /* Encode the action field in the stream, respecting the subword width */
+ switch (af->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ value = &af->data.u1.value;
+ break;
+ case VCAP_FIELD_U32:
+ value = (const u8 *)&af->data.u32.value;
+ break;
+ case VCAP_FIELD_U48:
+ value = af->data.u48.value;
+ break;
+ case VCAP_FIELD_U56:
+ value = af->data.u56.value;
+ break;
+ case VCAP_FIELD_U64:
+ value = af->data.u64.value;
+ break;
+ case VCAP_FIELD_U72:
+ value = af->data.u72.value;
+ break;
+ case VCAP_FIELD_U112:
+ value = af->data.u112.value;
+ break;
+ case VCAP_FIELD_U128:
+ value = af->data.u128.value;
+ break;
+ }
+ vcap_iter_init(&iter, act_width, tgt, rf->offset);
+ vcap_encode_field(cache->actionstream, &iter, rf->width, value);
+}
+
+static void vcap_encode_actionfield_typegroups(struct vcap_rule_internal *ri,
+ const struct vcap_typegroup *tgt)
+{
+ int sw_width = ri->vctrl->vcaps[ri->admin->vtype].act_width;
+ struct vcap_cache_data *cache = &ri->admin->cache;
+
+ /* Encode the typegroup bits for the actionstream respecting the subword
+ * width.
+ */
+ vcap_encode_typegroups(cache->actionstream, sw_width, tgt, false);
+}
+
+static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri)
+{
+ const struct vcap_client_actionfield *caf;
+ const struct vcap_typegroup *tg_table;
+ const struct vcap_field *af_table;
+ int actionset_size;
+
+ /* Get a valid set of actionset fields for the specific actionset */
+ af_table = vcap_actionfields(ri->vctrl, ri->admin->vtype,
+ ri->data.actionset);
+ if (!af_table) {
+ pr_err("%s:%d: no fields available for this actionset: %d\n",
+ __func__, __LINE__, ri->data.actionset);
+ return -EINVAL;
+ }
+ /* Get a valid typegroup for the specific actionset */
+ tg_table = vcap_actionfield_typegroup(ri->vctrl, ri->admin->vtype,
+ ri->data.actionset);
+ if (!tg_table) {
+ pr_err("%s:%d: no typegroups available for this actionset: %d\n",
+ __func__, __LINE__, ri->data.actionset);
+ return -EINVAL;
+ }
+ /* Get a valid actionset size for the specific actionset */
+ actionset_size = vcap_actionfield_count(ri->vctrl, ri->admin->vtype,
+ ri->data.actionset);
+ if (actionset_size == 0) {
+ pr_err("%s:%d: zero field count for this actionset: %d\n",
+ __func__, __LINE__, ri->data.actionset);
+ return -EINVAL;
+ }
+ /* Iterate over the actionfields in the rule
+ * and encode these bits
+ */
+ if (list_empty(&ri->data.actionfields))
+ pr_warn("%s:%d: no actionfields in the rule\n",
+ __func__, __LINE__);
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ /* Check that the client action exists in the actionset */
+ if (caf->ctrl.action >= actionset_size) {
+ pr_err("%s:%d: action %d is not in vcap\n",
+ __func__, __LINE__, caf->ctrl.action);
+ return -EINVAL;
+ }
+ vcap_encode_actionfield(ri, caf, &af_table[caf->ctrl.action],
+ tg_table);
+ }
+ /* Add typegroup bits to the entry bitstreams */
+ vcap_encode_actionfield_typegroups(ri, tg_table);
+ return 0;
+}
+
+static int vcap_encode_rule(struct vcap_rule_internal *ri)
+{
+ int err;
+
+ err = vcap_encode_rule_keyset(ri);
+ if (err)
+ return err;
+ err = vcap_encode_rule_actionset(ri);
+ if (err)
+ return err;
+ return 0;
+}
+
+int vcap_api_check(struct vcap_control *ctrl)
+{
+ if (!ctrl) {
+ pr_err("%s:%d: vcap control is missing\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!ctrl->ops || !ctrl->ops->validate_keyset ||
+ !ctrl->ops->add_default_fields || !ctrl->ops->cache_erase ||
+ !ctrl->ops->cache_write || !ctrl->ops->cache_read ||
+ !ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move ||
+ !ctrl->ops->port_info || !ctrl->ops->enable) {
+ pr_err("%s:%d: client operations are missing\n",
+ __func__, __LINE__);
+ return -ENOENT;
+ }
+ return 0;
+}
+
+void vcap_erase_cache(struct vcap_rule_internal *ri)
+{
+ ri->vctrl->ops->cache_erase(ri->admin);
+}
+
+/* Update the keyset for the rule */
+int vcap_set_rule_set_keyset(struct vcap_rule *rule,
+ enum vcap_keyfield_set keyset)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_set *kset;
+ int sw_width;
+
+ kset = vcap_keyfieldset(ri->vctrl, ri->admin->vtype, keyset);
+ /* Check that the keyset is valid */
+ if (!kset)
+ return -EINVAL;
+ ri->keyset_sw = kset->sw_per_item;
+ sw_width = ri->vctrl->vcaps[ri->admin->vtype].sw_width;
+ ri->keyset_sw_regs = DIV_ROUND_UP(sw_width, 32);
+ ri->data.keyset = keyset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_set_rule_set_keyset);
+
+/* Update the actionset for the rule */
+int vcap_set_rule_set_actionset(struct vcap_rule *rule,
+ enum vcap_actionfield_set actionset)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_set *aset;
+ int act_width;
+
+ aset = vcap_actionfieldset(ri->vctrl, ri->admin->vtype, actionset);
+ /* Check that the actionset is valid */
+ if (!aset)
+ return -EINVAL;
+ ri->actionset_sw = aset->sw_per_item;
+ act_width = ri->vctrl->vcaps[ri->admin->vtype].act_width;
+ ri->actionset_sw_regs = DIV_ROUND_UP(act_width, 32);
+ ri->data.actionset = actionset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_set_rule_set_actionset);
+
+/* Find a rule with a provided rule id */
+static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl,
+ u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(ri, &admin->rules, list)
+ if (ri->data.id == id)
+ return ri;
+ return NULL;
+}
+
+/* Find a rule id with a provided cookie */
+int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(ri, &admin->rules, list)
+ if (ri->data.cookie == cookie)
+ return ri->data.id;
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie);
+
+/* Make a shallow copy of the rule without the fields */
+struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_rule_internal *duprule;
+
+ /* Allocate the client part */
+ duprule = kzalloc(sizeof(*duprule), GFP_KERNEL);
+ if (!duprule)
+ return ERR_PTR(-ENOMEM);
+ *duprule = *ri;
+ /* Not inserted in the VCAP */
+ INIT_LIST_HEAD(&duprule->list);
+ /* No elements in these lists */
+ INIT_LIST_HEAD(&duprule->data.keyfields);
+ INIT_LIST_HEAD(&duprule->data.actionfields);
+ return duprule;
+}
+
+static void vcap_apply_width(u8 *dst, int width, int bytes)
+{
+ u8 bmask;
+ int idx;
+
+ for (idx = 0; idx < bytes; idx++) {
+ if (width > 0)
+ if (width < 8)
+ bmask = (1 << width) - 1;
+ else
+ bmask = ~0;
+ else
+ bmask = 0;
+ dst[idx] &= bmask;
+ width -= 8;
+ }
+}
+
+static void vcap_copy_from_w32be(u8 *dst, u8 *src, int size, int width)
+{
+ int idx, ridx, wstart, nidx;
+ int tail_bytes = (((size + 4) >> 2) << 2) - size;
+
+ for (idx = 0, ridx = size - 1; idx < size; ++idx, --ridx) {
+ wstart = (idx >> 2) << 2;
+ nidx = wstart + 3 - (idx & 0x3);
+ if (nidx >= size)
+ nidx -= tail_bytes;
+ dst[nidx] = src[ridx];
+ }
+
+ vcap_apply_width(dst, width, size);
+}
+
+static void vcap_copy_action_bit_field(struct vcap_u1_action *field, u8 *value)
+{
+ field->value = (*value) & 0x1;
+}
+
+static void vcap_copy_limited_actionfield(u8 *dstvalue, u8 *srcvalue,
+ int width, int bytes)
+{
+ memcpy(dstvalue, srcvalue, bytes);
+ vcap_apply_width(dstvalue, width, bytes);
+}
+
+static void vcap_copy_to_client_actionfield(struct vcap_rule_internal *ri,
+ struct vcap_client_actionfield *field,
+ u8 *value, u16 width)
+{
+ int field_size = actionfield_size_table[field->ctrl.type];
+
+ if (ri->admin->w32be) {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_action_bit_field(&field->data.u1, value);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_actionfield((u8 *)&field->data.u32.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_from_w32be(field->data.u48.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_from_w32be(field->data.u56.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_from_w32be(field->data.u64.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_from_w32be(field->data.u72.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_from_w32be(field->data.u112.value, value,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_from_w32be(field->data.u128.value, value,
+ field_size, width);
+ break;
+ };
+ } else {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_action_bit_field(&field->data.u1, value);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_actionfield((u8 *)&field->data.u32.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_limited_actionfield(field->data.u48.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_limited_actionfield(field->data.u56.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_limited_actionfield(field->data.u64.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_limited_actionfield(field->data.u72.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_limited_actionfield(field->data.u112.value,
+ value,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_limited_actionfield(field->data.u128.value,
+ value,
+ width, field_size);
+ break;
+ };
+ }
+}
+
+static void vcap_copy_key_bit_field(struct vcap_u1_key *field,
+ u8 *value, u8 *mask)
+{
+ field->value = (*value) & 0x1;
+ field->mask = (*mask) & 0x1;
+}
+
+static void vcap_copy_limited_keyfield(u8 *dstvalue, u8 *dstmask,
+ u8 *srcvalue, u8 *srcmask,
+ int width, int bytes)
+{
+ memcpy(dstvalue, srcvalue, bytes);
+ vcap_apply_width(dstvalue, width, bytes);
+ memcpy(dstmask, srcmask, bytes);
+ vcap_apply_width(dstmask, width, bytes);
+}
+
+static void vcap_copy_to_client_keyfield(struct vcap_rule_internal *ri,
+ struct vcap_client_keyfield *field,
+ u8 *value, u8 *mask, u16 width)
+{
+ int field_size = keyfield_size_table[field->ctrl.type] / 2;
+
+ if (ri->admin->w32be) {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_key_bit_field(&field->data.u1, value, mask);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_keyfield((u8 *)&field->data.u32.value,
+ (u8 *)&field->data.u32.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_from_w32be(field->data.u48.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u48.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_from_w32be(field->data.u56.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u56.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_from_w32be(field->data.u64.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u64.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_from_w32be(field->data.u72.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u72.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_from_w32be(field->data.u112.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u112.mask, mask,
+ field_size, width);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_from_w32be(field->data.u128.value, value,
+ field_size, width);
+ vcap_copy_from_w32be(field->data.u128.mask, mask,
+ field_size, width);
+ break;
+ };
+ } else {
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ vcap_copy_key_bit_field(&field->data.u1, value, mask);
+ break;
+ case VCAP_FIELD_U32:
+ vcap_copy_limited_keyfield((u8 *)&field->data.u32.value,
+ (u8 *)&field->data.u32.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_limited_keyfield(field->data.u48.value,
+ field->data.u48.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_limited_keyfield(field->data.u56.value,
+ field->data.u56.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_limited_keyfield(field->data.u64.value,
+ field->data.u64.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_limited_keyfield(field->data.u72.value,
+ field->data.u72.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_limited_keyfield(field->data.u112.value,
+ field->data.u112.mask,
+ value, mask,
+ width, field_size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_limited_keyfield(field->data.u128.value,
+ field->data.u128.mask,
+ value, mask,
+ width, field_size);
+ break;
+ };
+ }
+}
+
+static void vcap_rule_alloc_keyfield(struct vcap_rule_internal *ri,
+ const struct vcap_field *keyfield,
+ enum vcap_key_field key,
+ u8 *value, u8 *mask)
+{
+ struct vcap_client_keyfield *field;
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return;
+ INIT_LIST_HEAD(&field->ctrl.list);
+ field->ctrl.key = key;
+ field->ctrl.type = keyfield->type;
+ vcap_copy_to_client_keyfield(ri, field, value, mask, keyfield->width);
+ list_add_tail(&field->ctrl.list, &ri->data.keyfields);
+}
+
+/* Read key data from a VCAP address and discover if there is a rule keyset
+ * here
+ */
+static bool
+vcap_verify_actionstream_actionset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *actionstream,
+ enum vcap_actionfield_set actionset)
+{
+ const struct vcap_typegroup *tgt;
+ const struct vcap_field *fields;
+ const struct vcap_set *info;
+
+ if (vcap_actionfield_count(vctrl, vt, actionset) == 0)
+ return false;
+
+ info = vcap_actionfieldset(vctrl, vt, actionset);
+ /* Check that the actionset is valid */
+ if (!info)
+ return false;
+
+ /* a type_id of value -1 means that there is no type field */
+ if (info->type_id == (u8)-1)
+ return true;
+
+ /* Get a valid typegroup for the specific actionset */
+ tgt = vcap_actionfield_typegroup(vctrl, vt, actionset);
+ if (!tgt)
+ return false;
+
+ fields = vcap_actionfields(vctrl, vt, actionset);
+ if (!fields)
+ return false;
+
+ /* Later this will be expanded with a check of the type id */
+ return true;
+}
+
+/* Find the subword width of the action typegroup that matches the stream data
+ */
+static int vcap_find_actionstream_typegroup_sw(struct vcap_control *vctrl,
+ enum vcap_type vt, u32 *stream,
+ int sw_max)
+{
+ const struct vcap_typegroup **tgt;
+ int sw_idx, res;
+
+ tgt = vctrl->vcaps[vt].actionfield_set_typegroups;
+ /* Try the longest subword match first */
+ for (sw_idx = vctrl->vcaps[vt].sw_count; sw_idx >= 0; sw_idx--) {
+ if (!tgt[sw_idx])
+ continue;
+ res = vcap_verify_typegroups(stream, vctrl->vcaps[vt].act_width,
+ tgt[sw_idx], false, sw_max);
+ if (res == 0)
+ return sw_idx;
+ }
+ return -EINVAL;
+}
+
+/* Verify that the typegroup information, subword count, actionset and type id
+ * are in sync and correct, return the actionset
+ */
+static enum vcap_actionfield_set
+vcap_find_actionstream_actionset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *stream,
+ int sw_max)
+{
+ const struct vcap_set *actionfield_set;
+ int sw_count, idx;
+ bool res;
+
+ sw_count = vcap_find_actionstream_typegroup_sw(vctrl, vt, stream,
+ sw_max);
+ if (sw_count < 0)
+ return sw_count;
+
+ actionfield_set = vctrl->vcaps[vt].actionfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].actionfield_set_size; ++idx) {
+ if (actionfield_set[idx].sw_per_item != sw_count)
+ continue;
+
+ res = vcap_verify_actionstream_actionset(vctrl, vt,
+ stream, idx);
+ if (res)
+ return idx;
+ }
+ return -EINVAL;
+}
+
+/* Store action value in an element in a list for the client */
+static void vcap_rule_alloc_actionfield(struct vcap_rule_internal *ri,
+ const struct vcap_field *actionfield,
+ enum vcap_action_field action,
+ u8 *value)
+{
+ struct vcap_client_actionfield *field;
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return;
+ INIT_LIST_HEAD(&field->ctrl.list);
+ field->ctrl.action = action;
+ field->ctrl.type = actionfield->type;
+ vcap_copy_to_client_actionfield(ri, field, value, actionfield->width);
+ list_add_tail(&field->ctrl.list, &ri->data.actionfields);
+}
+
+static int vcap_decode_actionset(struct vcap_rule_internal *ri)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *actionfield;
+ enum vcap_actionfield_set actionset;
+ enum vcap_type vt = admin->vtype;
+ const struct vcap_typegroup *tgt;
+ struct vcap_stream_iter iter;
+ int idx, res, actfield_count;
+ u32 *actstream;
+ u8 value[16];
+
+ actstream = admin->cache.actionstream;
+ res = vcap_find_actionstream_actionset(vctrl, vt, actstream, 0);
+ if (res < 0) {
+ pr_err("%s:%d: could not find valid actionset: %d\n",
+ __func__, __LINE__, res);
+ return -EINVAL;
+ }
+ actionset = res;
+ actfield_count = vcap_actionfield_count(vctrl, vt, actionset);
+ actionfield = vcap_actionfields(vctrl, vt, actionset);
+ tgt = vcap_actionfield_typegroup(vctrl, vt, actionset);
+ /* Start decoding the stream */
+ for (idx = 0; idx < actfield_count; ++idx) {
+ if (actionfield[idx].width <= 0)
+ continue;
+ /* Get the action */
+ memset(value, 0, DIV_ROUND_UP(actionfield[idx].width, 8));
+ vcap_iter_init(&iter, vctrl->vcaps[vt].act_width, tgt,
+ actionfield[idx].offset);
+ vcap_decode_field(actstream, &iter, actionfield[idx].width,
+ value);
+ /* Skip if no bits are set */
+ if (vcap_bitarray_zero(actionfield[idx].width, value))
+ continue;
+ vcap_rule_alloc_actionfield(ri, &actionfield[idx], idx, value);
+ /* Later the action id will also be checked */
+ }
+ return vcap_set_rule_set_actionset((struct vcap_rule *)ri, actionset);
+}
+
+static int vcap_decode_keyset(struct vcap_rule_internal *ri)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_stream_iter kiter, miter;
+ struct vcap_admin *admin = ri->admin;
+ enum vcap_keyfield_set keysets[10];
+ const struct vcap_field *keyfield;
+ enum vcap_type vt = admin->vtype;
+ const struct vcap_typegroup *tgt;
+ struct vcap_keyset_list matches;
+ enum vcap_keyfield_set keyset;
+ int idx, res, keyfield_count;
+ u32 *maskstream;
+ u32 *keystream;
+ u8 value[16];
+ u8 mask[16];
+
+ keystream = admin->cache.keystream;
+ maskstream = admin->cache.maskstream;
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+ res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream,
+ false, 0, &matches);
+ if (res < 0) {
+ pr_err("%s:%d: could not find valid keysets: %d\n",
+ __func__, __LINE__, res);
+ return -EINVAL;
+ }
+ keyset = matches.keysets[0];
+ keyfield_count = vcap_keyfield_count(vctrl, vt, keyset);
+ keyfield = vcap_keyfields(vctrl, vt, keyset);
+ tgt = vcap_keyfield_typegroup(vctrl, vt, keyset);
+ /* Start decoding the streams */
+ for (idx = 0; idx < keyfield_count; ++idx) {
+ if (keyfield[idx].width <= 0)
+ continue;
+ /* First get the mask */
+ memset(mask, 0, DIV_ROUND_UP(keyfield[idx].width, 8));
+ vcap_iter_init(&miter, vctrl->vcaps[vt].sw_width, tgt,
+ keyfield[idx].offset);
+ vcap_decode_field(maskstream, &miter, keyfield[idx].width,
+ mask);
+ /* Skip if no mask bits are set */
+ if (vcap_bitarray_zero(keyfield[idx].width, mask))
+ continue;
+ /* Get the key */
+ memset(value, 0, DIV_ROUND_UP(keyfield[idx].width, 8));
+ vcap_iter_init(&kiter, vctrl->vcaps[vt].sw_width, tgt,
+ keyfield[idx].offset);
+ vcap_decode_field(keystream, &kiter, keyfield[idx].width,
+ value);
+ vcap_rule_alloc_keyfield(ri, &keyfield[idx], idx, value, mask);
+ }
+ return vcap_set_rule_set_keyset((struct vcap_rule *)ri, keyset);
+}
+
+/* Read VCAP content into the VCAP cache */
+static int vcap_read_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_admin *admin = ri->admin;
+ int sw_idx, ent_idx = 0, act_idx = 0;
+ u32 addr = ri->addr;
+
+ if (!ri->size || !ri->keyset_sw_regs || !ri->actionset_sw_regs) {
+ pr_err("%s:%d: rule is empty\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ vcap_erase_cache(ri);
+ /* Use the values in the streams to read the VCAP cache */
+ for (sw_idx = 0; sw_idx < ri->size; sw_idx++, addr++) {
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_READ,
+ VCAP_SEL_ALL, addr);
+ ri->vctrl->ops->cache_read(ri->ndev, admin,
+ VCAP_SEL_ENTRY, ent_idx,
+ ri->keyset_sw_regs);
+ ri->vctrl->ops->cache_read(ri->ndev, admin,
+ VCAP_SEL_ACTION, act_idx,
+ ri->actionset_sw_regs);
+ if (sw_idx == 0)
+ ri->vctrl->ops->cache_read(ri->ndev, admin,
+ VCAP_SEL_COUNTER,
+ ri->counter_id, 0);
+ ent_idx += ri->keyset_sw_regs;
+ act_idx += ri->actionset_sw_regs;
+ }
+ return 0;
+}
+
+/* Write VCAP cache content to the VCAP HW instance */
+static int vcap_write_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_admin *admin = ri->admin;
+ int sw_idx, ent_idx = 0, act_idx = 0;
+ u32 addr = ri->addr;
+
+ if (!ri->size || !ri->keyset_sw_regs || !ri->actionset_sw_regs) {
+ pr_err("%s:%d: rule is empty\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ /* Use the values in the streams to write the VCAP cache */
+ for (sw_idx = 0; sw_idx < ri->size; sw_idx++, addr++) {
+ ri->vctrl->ops->cache_write(ri->ndev, admin,
+ VCAP_SEL_ENTRY, ent_idx,
+ ri->keyset_sw_regs);
+ ri->vctrl->ops->cache_write(ri->ndev, admin,
+ VCAP_SEL_ACTION, act_idx,
+ ri->actionset_sw_regs);
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_WRITE,
+ VCAP_SEL_ALL, addr);
+ ent_idx += ri->keyset_sw_regs;
+ act_idx += ri->actionset_sw_regs;
+ }
+ return 0;
+}
+
+static int vcap_write_counter(struct vcap_rule_internal *ri,
+ struct vcap_counter *ctr)
+{
+ struct vcap_admin *admin = ri->admin;
+
+ admin->cache.counter = ctr->value;
+ admin->cache.sticky = ctr->sticky;
+ ri->vctrl->ops->cache_write(ri->ndev, admin, VCAP_SEL_COUNTER,
+ ri->counter_id, 0);
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_WRITE,
+ VCAP_SEL_COUNTER, ri->addr);
+ return 0;
+}
+
+/* Convert a chain id to a VCAP lookup index */
+int vcap_chain_id_to_lookup(struct vcap_admin *admin, int cur_cid)
+{
+ int lookup_first = admin->vinst * admin->lookups_per_instance;
+ int lookup_last = lookup_first + admin->lookups_per_instance;
+ int cid_next = admin->first_cid + VCAP_CID_LOOKUP_SIZE;
+ int cid = admin->first_cid;
+ int lookup;
+
+ for (lookup = lookup_first; lookup < lookup_last; ++lookup,
+ cid += VCAP_CID_LOOKUP_SIZE, cid_next += VCAP_CID_LOOKUP_SIZE)
+ if (cur_cid >= cid && cur_cid < cid_next)
+ return lookup;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_chain_id_to_lookup);
+
+/* Lookup a vcap instance using chain id */
+struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
+{
+ struct vcap_admin *admin;
+
+ if (vcap_api_check(vctrl))
+ return NULL;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (cid >= admin->first_cid && cid <= admin->last_cid)
+ return admin;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vcap_find_admin);
+
+/* Is the next chain id in the following lookup, possible in another VCAP */
+bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid)
+{
+ struct vcap_admin *admin, *next_admin;
+ int lookup, next_lookup;
+
+ /* The offset must be at least one lookup */
+ if (next_cid < cur_cid + VCAP_CID_LOOKUP_SIZE)
+ return false;
+
+ if (vcap_api_check(vctrl))
+ return false;
+
+ admin = vcap_find_admin(vctrl, cur_cid);
+ if (!admin)
+ return false;
+
+ /* If no VCAP contains the next chain, the next chain must be beyond
+ * the last chain in the current VCAP
+ */
+ next_admin = vcap_find_admin(vctrl, next_cid);
+ if (!next_admin)
+ return next_cid > admin->last_cid;
+
+ lookup = vcap_chain_id_to_lookup(admin, cur_cid);
+ next_lookup = vcap_chain_id_to_lookup(next_admin, next_cid);
+
+ /* Next lookup must be the following lookup */
+ if (admin == next_admin || admin->vtype == next_admin->vtype)
+ return next_lookup == lookup + 1;
+
+ /* Must be the first lookup in the next VCAP instance */
+ return next_lookup == 0;
+}
+EXPORT_SYMBOL_GPL(vcap_is_next_lookup);
+
+/* Check if there is room for a new rule */
+static int vcap_rule_space(struct vcap_admin *admin, int size)
+{
+ if (admin->last_used_addr - size < admin->first_valid_addr) {
+ pr_err("%s:%d: No room for rule size: %u, %u\n",
+ __func__, __LINE__, size, admin->first_valid_addr);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+/* Add the keyset typefield to the list of rule keyfields */
+static int vcap_add_type_keyfield(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_keyfield_set keyset = rule->keyset;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+ const struct vcap_set *kset;
+ int ret = -EINVAL;
+
+ kset = vcap_keyfieldset(ri->vctrl, vt, keyset);
+ if (!kset)
+ return ret;
+ if (kset->type_id == (u8)-1) /* No type field is needed */
+ return 0;
+
+ fields = vcap_keyfields(ri->vctrl, vt, keyset);
+ if (!fields)
+ return -EINVAL;
+ if (fields[VCAP_KF_TYPE].width > 1) {
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE,
+ kset->type_id, 0xff);
+ } else {
+ if (kset->type_id)
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_TYPE,
+ VCAP_BIT_1);
+ else
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_TYPE,
+ VCAP_BIT_0);
+ }
+ return 0;
+}
+
+/* Add a keyset to a keyset list */
+bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
+ enum vcap_keyfield_set keyset)
+{
+ int idx;
+
+ if (keysetlist->cnt < keysetlist->max) {
+ /* Avoid duplicates */
+ for (idx = 0; idx < keysetlist->cnt; ++idx)
+ if (keysetlist->keysets[idx] == keyset)
+ return keysetlist->cnt < keysetlist->max;
+ keysetlist->keysets[keysetlist->cnt++] = keyset;
+ }
+ return keysetlist->cnt < keysetlist->max;
+}
+EXPORT_SYMBOL_GPL(vcap_keyset_list_add);
+
+/* map keyset id to a string with the keyset name */
+const char *vcap_keyset_name(struct vcap_control *vctrl,
+ enum vcap_keyfield_set keyset)
+{
+ return vctrl->stats->keyfield_set_names[keyset];
+}
+EXPORT_SYMBOL_GPL(vcap_keyset_name);
+
+/* map key field id to a string with the key name */
+const char *vcap_keyfield_name(struct vcap_control *vctrl,
+ enum vcap_key_field key)
+{
+ return vctrl->stats->keyfield_names[key];
+}
+EXPORT_SYMBOL_GPL(vcap_keyfield_name);
+
+/* map actionset id to a string with the actionset name */
+const char *vcap_actionset_name(struct vcap_control *vctrl,
+ enum vcap_actionfield_set actionset)
+{
+ return vctrl->stats->actionfield_set_names[actionset];
+}
+
+/* map action field id to a string with the action name */
+const char *vcap_actionfield_name(struct vcap_control *vctrl,
+ enum vcap_action_field action)
+{
+ return vctrl->stats->actionfield_names[action];
+}
+
+/* Return the keyfield that matches a key in a keyset */
+static const struct vcap_field *
+vcap_find_keyset_keyfield(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ enum vcap_keyfield_set keyset,
+ enum vcap_key_field key)
+{
+ const struct vcap_field *fields;
+ int idx, count;
+
+ fields = vcap_keyfields(vctrl, vtype, keyset);
+ if (!fields)
+ return NULL;
+
+ /* Iterate the keyfields of the keyset */
+ count = vcap_keyfield_count(vctrl, vtype, keyset);
+ for (idx = 0; idx < count; ++idx) {
+ if (fields[idx].width == 0)
+ continue;
+
+ if (key == idx)
+ return &fields[idx];
+ }
+
+ return NULL;
+}
+
+/* Match a list of keys against the keysets available in a vcap type */
+static bool _vcap_rule_find_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ const struct vcap_client_keyfield *ckf;
+ int keyset, found, keycount, map_size;
+ const struct vcap_field **map;
+ enum vcap_type vtype;
+
+ vtype = ri->admin->vtype;
+ map = ri->vctrl->vcaps[vtype].keyfield_set_map;
+ map_size = ri->vctrl->vcaps[vtype].keyfield_set_size;
+
+ /* Get a count of the keyfields we want to match */
+ keycount = 0;
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ ++keycount;
+
+ matches->cnt = 0;
+ /* Iterate the keysets of the VCAP */
+ for (keyset = 0; keyset < map_size; ++keyset) {
+ if (!map[keyset])
+ continue;
+
+ /* Iterate the keys in the rule */
+ found = 0;
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (vcap_find_keyset_keyfield(ri->vctrl, vtype,
+ keyset, ckf->ctrl.key))
+ ++found;
+
+ /* Save the keyset if all keyfields were found */
+ if (found == keycount)
+ if (!vcap_keyset_list_add(matches, keyset))
+ /* bail out when the quota is filled */
+ break;
+ }
+
+ return matches->cnt > 0;
+}
+
+/* Match a list of keys against the keysets available in a vcap type */
+bool vcap_rule_find_keysets(struct vcap_rule *rule,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+
+ return _vcap_rule_find_keysets(ri, matches);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_find_keysets);
+
+/* Validate a rule with respect to available port keys */
+int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_keyset_list matches = {};
+ enum vcap_keyfield_set keysets[10];
+ int ret;
+
+ ret = vcap_api_check(ri->vctrl);
+ if (ret)
+ return ret;
+ if (!ri->admin) {
+ ri->data.exterr = VCAP_ERR_NO_ADMIN;
+ return -EINVAL;
+ }
+ if (!ri->ndev) {
+ ri->data.exterr = VCAP_ERR_NO_NETDEV;
+ return -EINVAL;
+ }
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+ if (ri->data.keyset == VCAP_KFS_NO_VALUE) {
+ /* Iterate over rule keyfields and select keysets that fits */
+ if (!_vcap_rule_find_keysets(ri, &matches)) {
+ ri->data.exterr = VCAP_ERR_NO_KEYSET_MATCH;
+ return -EINVAL;
+ }
+ } else {
+ /* prepare for keyset validation */
+ keysets[0] = ri->data.keyset;
+ matches.cnt = 1;
+ }
+
+ /* Pick a keyset that is supported in the port lookups */
+ ret = ri->vctrl->ops->validate_keyset(ri->ndev, ri->admin, rule,
+ &matches, l3_proto);
+ if (ret < 0) {
+ pr_err("%s:%d: keyset validation failed: %d\n",
+ __func__, __LINE__, ret);
+ ri->data.exterr = VCAP_ERR_NO_PORT_KEYSET_MATCH;
+ return ret;
+ }
+ /* use the keyset that is supported in the port lookups */
+ ret = vcap_set_rule_set_keyset(rule, ret);
+ if (ret < 0) {
+ pr_err("%s:%d: keyset was not updated: %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ if (ri->data.actionset == VCAP_AFS_NO_VALUE) {
+ /* Later also actionsets will be matched against actions in
+ * the rule, and the type will be set accordingly
+ */
+ ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
+ return -EINVAL;
+ }
+ vcap_add_type_keyfield(rule);
+ /* Add default fields to this rule */
+ ri->vctrl->ops->add_default_fields(ri->ndev, ri->admin, rule);
+
+ /* Rule size is the maximum of the entry and action subword count */
+ ri->size = max(ri->keyset_sw, ri->actionset_sw);
+
+ /* Finally check if there is room for the rule in the VCAP */
+ return vcap_rule_space(ri->admin, ri->size);
+}
+EXPORT_SYMBOL_GPL(vcap_val_rule);
+
+/* Entries are sorted with increasing values of sort_key.
+ * I.e. Lowest numerical sort_key is first in list.
+ * In order to locate largest keys first in list we negate the key size with
+ * (max_size - size).
+ */
+static u32 vcap_sort_key(u32 max_size, u32 size, u8 user, u16 prio)
+{
+ return ((max_size - size) << 24) | (user << 16) | prio;
+}
+
+/* calculate the address of the next rule after this (lower address and prio) */
+static u32 vcap_next_rule_addr(u32 addr, struct vcap_rule_internal *ri)
+{
+ return ((addr - ri->size) / ri->size) * ri->size;
+}
+
+/* Assign a unique rule id and autogenerate one if id == 0 */
+static u32 vcap_set_rule_id(struct vcap_rule_internal *ri)
+{
+ if (ri->data.id != 0)
+ return ri->data.id;
+
+ for (u32 next_id = 1; next_id < ~0; ++next_id) {
+ if (!vcap_lookup_rule(ri->vctrl, next_id)) {
+ ri->data.id = next_id;
+ break;
+ }
+ }
+ return ri->data.id;
+}
+
+static int vcap_insert_rule(struct vcap_rule_internal *ri,
+ struct vcap_rule_move *move)
+{
+ int sw_count = ri->vctrl->vcaps[ri->admin->vtype].sw_count;
+ struct vcap_rule_internal *duprule, *iter, *elem = NULL;
+ struct vcap_admin *admin = ri->admin;
+ u32 addr;
+
+ ri->sort_key = vcap_sort_key(sw_count, ri->size, ri->data.user,
+ ri->data.priority);
+
+ /* Insert the new rule in the list of rule based on the sort key
+ * If the rule needs to be inserted between existing rules then move
+ * these rules to make room for the new rule and update their start
+ * address.
+ */
+ list_for_each_entry(iter, &admin->rules, list) {
+ if (ri->sort_key < iter->sort_key) {
+ elem = iter;
+ break;
+ }
+ }
+
+ if (!elem) {
+ ri->addr = vcap_next_rule_addr(admin->last_used_addr, ri);
+ admin->last_used_addr = ri->addr;
+
+ /* Add a shallow copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri);
+ if (IS_ERR(duprule))
+ return PTR_ERR(duprule);
+
+ list_add_tail(&duprule->list, &admin->rules);
+ return 0;
+ }
+
+ /* Reuse the space of the current rule */
+ addr = elem->addr + elem->size;
+ ri->addr = vcap_next_rule_addr(addr, ri);
+ addr = ri->addr;
+
+ /* Add a shallow copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri);
+ if (IS_ERR(duprule))
+ return PTR_ERR(duprule);
+
+ /* Add before the current entry */
+ list_add_tail(&duprule->list, &elem->list);
+
+ /* Update the current rule */
+ elem->addr = vcap_next_rule_addr(addr, elem);
+ addr = elem->addr;
+
+ /* Update the address in the remaining rules in the list */
+ list_for_each_entry_continue(elem, &admin->rules, list) {
+ elem->addr = vcap_next_rule_addr(addr, elem);
+ addr = elem->addr;
+ }
+
+ /* Update the move info */
+ move->addr = admin->last_used_addr;
+ move->count = ri->addr - addr;
+ move->offset = admin->last_used_addr - addr;
+ admin->last_used_addr = addr;
+ return 0;
+}
+
+static void vcap_move_rules(struct vcap_rule_internal *ri,
+ struct vcap_rule_move *move)
+{
+ ri->vctrl->ops->move(ri->ndev, ri->admin, move->addr,
+ move->offset, move->count);
+}
+
+/* Encode and write a validated rule to the VCAP */
+int vcap_add_rule(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_rule_move move = {0};
+ int ret;
+
+ ret = vcap_api_check(ri->vctrl);
+ if (ret)
+ return ret;
+ /* Insert the new rule in the list of vcap rules */
+ mutex_lock(&ri->admin->lock);
+ ret = vcap_insert_rule(ri, &move);
+ if (ret < 0) {
+ pr_err("%s:%d: could not insert rule in vcap list: %d\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+ if (move.count > 0)
+ vcap_move_rules(ri, &move);
+ ret = vcap_encode_rule(ri);
+ if (ret) {
+ pr_err("%s:%d: rule encoding error: %d\n", __func__, __LINE__, ret);
+ goto out;
+ }
+
+ ret = vcap_write_rule(ri);
+ if (ret)
+ pr_err("%s:%d: rule write error: %d\n", __func__, __LINE__, ret);
+out:
+ mutex_unlock(&ri->admin->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vcap_add_rule);
+
+/* Allocate a new rule with the provided arguments */
+struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
+ struct net_device *ndev, int vcap_chain_id,
+ enum vcap_user user, u16 priority,
+ u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err, maxsize;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return ERR_PTR(err);
+ if (!ndev)
+ return ERR_PTR(-ENODEV);
+ /* Get the VCAP instance */
+ admin = vcap_find_admin(vctrl, vcap_chain_id);
+ if (!admin)
+ return ERR_PTR(-ENOENT);
+ /* Sanity check that this VCAP is supported on this platform */
+ if (vctrl->vcaps[admin->vtype].rows == 0)
+ return ERR_PTR(-EINVAL);
+ /* Check if a rule with this id already exists */
+ if (vcap_lookup_rule(vctrl, id))
+ return ERR_PTR(-EEXIST);
+ /* Check if there is room for the rule in the block(s) of the VCAP */
+ maxsize = vctrl->vcaps[admin->vtype].sw_count; /* worst case rule size */
+ if (vcap_rule_space(admin, maxsize))
+ return ERR_PTR(-ENOSPC);
+ /* Create a container for the rule and return it */
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
+ if (!ri)
+ return ERR_PTR(-ENOMEM);
+ ri->data.vcap_chain_id = vcap_chain_id;
+ ri->data.user = user;
+ ri->data.priority = priority;
+ ri->data.id = id;
+ ri->data.keyset = VCAP_KFS_NO_VALUE;
+ ri->data.actionset = VCAP_AFS_NO_VALUE;
+ INIT_LIST_HEAD(&ri->list);
+ INIT_LIST_HEAD(&ri->data.keyfields);
+ INIT_LIST_HEAD(&ri->data.actionfields);
+ ri->ndev = ndev;
+ ri->admin = admin; /* refer to the vcap instance */
+ ri->vctrl = vctrl; /* refer to the client */
+ if (vcap_set_rule_id(ri) == 0)
+ goto out_free;
+ vcap_erase_cache(ri);
+ return (struct vcap_rule *)ri;
+
+out_free:
+ kfree(ri);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(vcap_alloc_rule);
+
+/* Free mem of a rule owned by client after the rule as been added to the VCAP */
+void vcap_free_rule(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_actionfield *caf, *next_caf;
+ struct vcap_client_keyfield *ckf, *next_ckf;
+
+ /* Deallocate the list of keys and actions */
+ list_for_each_entry_safe(ckf, next_ckf, &ri->data.keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+ list_for_each_entry_safe(caf, next_caf, &ri->data.actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+ /* Deallocate the rule */
+ kfree(rule);
+}
+EXPORT_SYMBOL_GPL(vcap_free_rule);
+
+struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *elem;
+ struct vcap_rule_internal *ri;
+ int err;
+
+ ri = NULL;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return ERR_PTR(err);
+ elem = vcap_lookup_rule(vctrl, id);
+ if (!elem)
+ return NULL;
+ mutex_lock(&elem->admin->lock);
+ ri = vcap_dup_rule(elem);
+ if (IS_ERR(ri))
+ goto unlock;
+ err = vcap_read_rule(ri);
+ if (err) {
+ ri = ERR_PTR(err);
+ goto unlock;
+ }
+ err = vcap_decode_keyset(ri);
+ if (err) {
+ ri = ERR_PTR(err);
+ goto unlock;
+ }
+ err = vcap_decode_actionset(ri);
+ if (err) {
+ ri = ERR_PTR(err);
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&elem->admin->lock);
+ return (struct vcap_rule *)ri;
+}
+EXPORT_SYMBOL_GPL(vcap_get_rule);
+
+/* Update existing rule */
+int vcap_mod_rule(struct vcap_rule *rule)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_counter ctr;
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return err;
+
+ if (!vcap_lookup_rule(ri->vctrl, ri->data.id))
+ return -ENOENT;
+
+ mutex_lock(&ri->admin->lock);
+ /* Encode the bitstreams to the VCAP cache */
+ vcap_erase_cache(ri);
+ err = vcap_encode_rule(ri);
+ if (err)
+ goto out;
+
+ err = vcap_write_rule(ri);
+ if (err)
+ goto out;
+
+ memset(&ctr, 0, sizeof(ctr));
+ err = vcap_write_counter(ri, &ctr);
+ if (err)
+ goto out;
+
+out:
+ mutex_unlock(&ri->admin->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_mod_rule);
+
+/* Return the alignment offset for a new rule address */
+static int vcap_valid_rule_move(struct vcap_rule_internal *el, int offset)
+{
+ return (el->addr + offset) % el->size;
+}
+
+/* Update the rule address with an offset */
+static void vcap_adjust_rule_addr(struct vcap_rule_internal *el, int offset)
+{
+ el->addr += offset;
+}
+
+/* Rules needs to be moved to fill the gap of the deleted rule */
+static int vcap_fill_rule_gap(struct vcap_rule_internal *ri)
+{
+ struct vcap_admin *admin = ri->admin;
+ struct vcap_rule_internal *elem;
+ struct vcap_rule_move move;
+ int gap = 0, offset = 0;
+
+ /* If the first rule is deleted: Move other rules to the top */
+ if (list_is_first(&ri->list, &admin->rules))
+ offset = admin->last_valid_addr + 1 - ri->addr - ri->size;
+
+ /* Locate gaps between odd size rules and adjust the move */
+ elem = ri;
+ list_for_each_entry_continue(elem, &admin->rules, list)
+ gap += vcap_valid_rule_move(elem, ri->size);
+
+ /* Update the address in the remaining rules in the list */
+ elem = ri;
+ list_for_each_entry_continue(elem, &admin->rules, list)
+ vcap_adjust_rule_addr(elem, ri->size + gap + offset);
+
+ /* Update the move info */
+ move.addr = admin->last_used_addr;
+ move.count = ri->addr - admin->last_used_addr - gap;
+ move.offset = -(ri->size + gap + offset);
+
+ /* Do the actual move operation */
+ vcap_move_rules(ri, &move);
+
+ return gap + offset;
+}
+
+/* Delete rule in a VCAP instance */
+int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id)
+{
+ struct vcap_rule_internal *ri, *elem;
+ struct vcap_admin *admin;
+ int gap = 0, err;
+
+ /* This will later also handle rule moving */
+ if (!ndev)
+ return -ENODEV;
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+ /* Look for the rule id in all vcaps */
+ ri = vcap_lookup_rule(vctrl, id);
+ if (!ri)
+ return -EINVAL;
+ admin = ri->admin;
+
+ if (ri->addr > admin->last_used_addr)
+ gap = vcap_fill_rule_gap(ri);
+
+ /* Delete the rule from the list of rules and the cache */
+ mutex_lock(&admin->lock);
+ list_del(&ri->list);
+ vctrl->ops->init(ndev, admin, admin->last_used_addr, ri->size + gap);
+ kfree(ri);
+ mutex_unlock(&admin->lock);
+
+ /* Update the last used address, set to default when no rules */
+ if (list_empty(&admin->rules)) {
+ admin->last_used_addr = admin->last_valid_addr + 1;
+ } else {
+ elem = list_last_entry(&admin->rules, struct vcap_rule_internal,
+ list);
+ admin->last_used_addr = elem->addr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_del_rule);
+
+/* Delete all rules in the VCAP instance */
+int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
+{
+ struct vcap_enabled_port *eport, *next_eport;
+ struct vcap_rule_internal *ri, *next_ri;
+ int ret = vcap_api_check(vctrl);
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&admin->lock);
+ list_for_each_entry_safe(ri, next_ri, &admin->rules, list) {
+ vctrl->ops->init(ri->ndev, admin, ri->addr, ri->size);
+ list_del(&ri->list);
+ kfree(ri);
+ }
+ admin->last_used_addr = admin->last_valid_addr;
+
+ /* Remove list of enabled ports */
+ list_for_each_entry_safe(eport, next_eport, &admin->enabled, list) {
+ list_del(&eport->list);
+ kfree(eport);
+ }
+ mutex_unlock(&admin->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_del_rules);
+
+/* Find a client key field in a rule */
+static struct vcap_client_keyfield *
+vcap_find_keyfield(struct vcap_rule *rule, enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *ckf;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (ckf->ctrl.key == key)
+ return ckf;
+ return NULL;
+}
+
+/* Find information on a key field in a rule */
+const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
+ enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_keyfield_set keyset = rule->keyset;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+
+ if (keyset == VCAP_KFS_NO_VALUE)
+ return NULL;
+ fields = vcap_keyfields(ri->vctrl, vt, keyset);
+ if (!fields)
+ return NULL;
+ return &fields[key];
+}
+EXPORT_SYMBOL_GPL(vcap_lookup_keyfield);
+
+/* Copy data from src to dst but reverse the data in chunks of 32bits.
+ * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will
+ * have the value 22:33:44:55:00:11.
+ */
+static void vcap_copy_to_w32be(u8 *dst, u8 *src, int size)
+{
+ for (int idx = 0; idx < size; ++idx) {
+ int first_byte_index = 0;
+ int nidx;
+
+ first_byte_index = size - (((idx >> 2) + 1) << 2);
+ if (first_byte_index < 0)
+ first_byte_index = 0;
+ nidx = idx + first_byte_index - (idx & ~0x3);
+ dst[nidx] = src[idx];
+ }
+}
+
+static void vcap_copy_from_client_keyfield(struct vcap_rule *rule,
+ struct vcap_client_keyfield *field,
+ struct vcap_client_keyfield_data *data)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ int size;
+
+ if (!ri->admin->w32be) {
+ memcpy(&field->data, data, sizeof(field->data));
+ return;
+ }
+
+ size = keyfield_size_table[field->ctrl.type] / 2;
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(&field->data, data, sizeof(field->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size);
+ vcap_copy_to_w32be(field->data.u48.mask, data->u48.mask, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size);
+ vcap_copy_to_w32be(field->data.u56.mask, data->u56.mask, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size);
+ vcap_copy_to_w32be(field->data.u64.mask, data->u64.mask, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size);
+ vcap_copy_to_w32be(field->data.u72.mask, data->u72.mask, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size);
+ vcap_copy_to_w32be(field->data.u112.mask, data->u112.mask, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size);
+ vcap_copy_to_w32be(field->data.u128.mask, data->u128.mask, size);
+ break;
+ }
+}
+
+/* Check if the keyfield is already in the rule */
+static bool vcap_keyfield_unique(struct vcap_rule *rule,
+ enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_keyfield *ckf;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (ckf->ctrl.key == key)
+ return false;
+ return true;
+}
+
+/* Check if the keyfield is in the keyset */
+static bool vcap_keyfield_match_keyset(struct vcap_rule *rule,
+ enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_keyfield_set keyset = rule->keyset;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+
+ /* the field is accepted if the rule has no keyset yet */
+ if (keyset == VCAP_KFS_NO_VALUE)
+ return true;
+ fields = vcap_keyfields(ri->vctrl, vt, keyset);
+ if (!fields)
+ return false;
+ /* if there is a width there is a way */
+ return fields[key].width > 0;
+}
+
+static int vcap_rule_add_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ enum vcap_field_type ftype,
+ struct vcap_client_keyfield_data *data)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *field;
+
+ if (!vcap_keyfield_unique(rule, key)) {
+ pr_warn("%s:%d: keyfield %s is already in the rule\n",
+ __func__, __LINE__,
+ vcap_keyfield_name(ri->vctrl, key));
+ return -EINVAL;
+ }
+
+ if (!vcap_keyfield_match_keyset(rule, key)) {
+ pr_err("%s:%d: keyfield %s does not belong in the rule keyset\n",
+ __func__, __LINE__,
+ vcap_keyfield_name(ri->vctrl, key));
+ return -EINVAL;
+ }
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return -ENOMEM;
+ field->ctrl.key = key;
+ field->ctrl.type = ftype;
+ vcap_copy_from_client_keyfield(rule, field, data);
+ list_add_tail(&field->ctrl.list, &rule->keyfields);
+ return 0;
+}
+
+static void vcap_rule_set_key_bitsize(struct vcap_u1_key *u1, enum vcap_bit val)
+{
+ switch (val) {
+ case VCAP_BIT_0:
+ u1->value = 0;
+ u1->mask = 1;
+ break;
+ case VCAP_BIT_1:
+ u1->value = 1;
+ u1->mask = 1;
+ break;
+ case VCAP_BIT_ANY:
+ u1->value = 0;
+ u1->mask = 0;
+ break;
+ }
+}
+
+/* Add a bit key with value and mask to the rule */
+int vcap_rule_add_key_bit(struct vcap_rule *rule, enum vcap_key_field key,
+ enum vcap_bit val)
+{
+ struct vcap_client_keyfield_data data;
+
+ vcap_rule_set_key_bitsize(&data.u1, val);
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_BIT, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_bit);
+
+/* Add a 32 bit key field with value and mask to the rule */
+int vcap_rule_add_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask)
+{
+ struct vcap_client_keyfield_data data;
+
+ data.u32.value = value;
+ data.u32.mask = mask;
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u32);
+
+/* Add a 48 bit key with value and mask to the rule */
+int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u48_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u48, fieldval, sizeof(data.u48));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U48, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u48);
+
+/* Add a 72 bit key with value and mask to the rule */
+int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u72_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u72, fieldval, sizeof(data.u72));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U72, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u72);
+
+/* Add a 128 bit key with value and mask to the rule */
+int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u128_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u128, fieldval, sizeof(data.u128));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U128, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u128);
+
+int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 *value, u32 *mask)
+{
+ struct vcap_client_keyfield *ckf;
+
+ ckf = vcap_find_keyfield(rule, key);
+ if (!ckf)
+ return -ENOENT;
+
+ *value = ckf->data.u32.value;
+ *mask = ckf->data.u32.mask;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_get_key_u32);
+
+/* Find a client action field in a rule */
+static struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
+{
+ struct vcap_rule_internal *ri = (struct vcap_rule_internal *)rule;
+ struct vcap_client_actionfield *caf;
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list)
+ if (caf->ctrl.action == act)
+ return caf;
+ return NULL;
+}
+
+static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
+ struct vcap_client_actionfield *field,
+ struct vcap_client_actionfield_data *data)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ int size;
+
+ if (!ri->admin->w32be) {
+ memcpy(&field->data, data, sizeof(field->data));
+ return;
+ }
+
+ size = actionfield_size_table[field->ctrl.type];
+ switch (field->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(&field->data, data, sizeof(field->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size);
+ break;
+ }
+}
+
+/* Check if the actionfield is already in the rule */
+static bool vcap_actionfield_unique(struct vcap_rule *rule,
+ enum vcap_action_field act)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_actionfield *caf;
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list)
+ if (caf->ctrl.action == act)
+ return false;
+ return true;
+}
+
+/* Check if the actionfield is in the actionset */
+static bool vcap_actionfield_match_actionset(struct vcap_rule *rule,
+ enum vcap_action_field action)
+{
+ enum vcap_actionfield_set actionset = rule->actionset;
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+
+ /* the field is accepted if the rule has no actionset yet */
+ if (actionset == VCAP_AFS_NO_VALUE)
+ return true;
+ fields = vcap_actionfields(ri->vctrl, vt, actionset);
+ if (!fields)
+ return false;
+ /* if there is a width there is a way */
+ return fields[action].width > 0;
+}
+
+static int vcap_rule_add_action(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_field_type ftype,
+ struct vcap_client_actionfield_data *data)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_actionfield *field;
+
+ if (!vcap_actionfield_unique(rule, action)) {
+ pr_warn("%s:%d: actionfield %s is already in the rule\n",
+ __func__, __LINE__,
+ vcap_actionfield_name(ri->vctrl, action));
+ return -EINVAL;
+ }
+
+ if (!vcap_actionfield_match_actionset(rule, action)) {
+ pr_err("%s:%d: actionfield %s does not belong in the rule actionset\n",
+ __func__, __LINE__,
+ vcap_actionfield_name(ri->vctrl, action));
+ return -EINVAL;
+ }
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return -ENOMEM;
+ field->ctrl.action = action;
+ field->ctrl.type = ftype;
+ vcap_copy_from_client_actionfield(rule, field, data);
+ list_add_tail(&field->ctrl.list, &rule->actionfields);
+ return 0;
+}
+
+static void vcap_rule_set_action_bitsize(struct vcap_u1_action *u1,
+ enum vcap_bit val)
+{
+ switch (val) {
+ case VCAP_BIT_0:
+ u1->value = 0;
+ break;
+ case VCAP_BIT_1:
+ u1->value = 1;
+ break;
+ case VCAP_BIT_ANY:
+ u1->value = 0;
+ break;
+ }
+}
+
+/* Add a bit action with value to the rule */
+int vcap_rule_add_action_bit(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_bit val)
+{
+ struct vcap_client_actionfield_data data;
+
+ vcap_rule_set_action_bitsize(&data.u1, val);
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_BIT, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_bit);
+
+/* Add a 32 bit action field with value to the rule */
+int vcap_rule_add_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value)
+{
+ struct vcap_client_actionfield_data data;
+
+ data.u32.value = value;
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32);
+
+static int vcap_read_counter(struct vcap_rule_internal *ri,
+ struct vcap_counter *ctr)
+{
+ struct vcap_admin *admin = ri->admin;
+
+ ri->vctrl->ops->update(ri->ndev, admin, VCAP_CMD_READ, VCAP_SEL_COUNTER,
+ ri->addr);
+ ri->vctrl->ops->cache_read(ri->ndev, admin, VCAP_SEL_COUNTER,
+ ri->counter_id, 0);
+ ctr->value = admin->cache.counter;
+ ctr->sticky = admin->cache.sticky;
+ return 0;
+}
+
+/* Copy to host byte order */
+void vcap_netbytes_copy(u8 *dst, u8 *src, int count)
+{
+ int idx;
+
+ for (idx = 0; idx < count; ++idx, ++dst)
+ *dst = src[count - idx - 1];
+}
+EXPORT_SYMBOL_GPL(vcap_netbytes_copy);
+
+/* Convert validation error code into tc extact error message */
+void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
+{
+ switch (vrule->exterr) {
+ case VCAP_ERR_NONE:
+ break;
+ case VCAP_ERR_NO_ADMIN:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Missing VCAP instance");
+ break;
+ case VCAP_ERR_NO_NETDEV:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Missing network interface");
+ break;
+ case VCAP_ERR_NO_KEYSET_MATCH:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No keyset matched the filter keys");
+ break;
+ case VCAP_ERR_NO_ACTIONSET_MATCH:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No actionset matched the filter actions");
+ break;
+ case VCAP_ERR_NO_PORT_KEYSET_MATCH:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No port keyset matched the filter keys");
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(vcap_set_tc_exterr);
+
+/* Check if this port is already enabled for this VCAP instance */
+static bool vcap_is_enabled(struct vcap_admin *admin, struct net_device *ndev,
+ unsigned long cookie)
+{
+ struct vcap_enabled_port *eport;
+
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->cookie == cookie || eport->ndev == ndev)
+ return true;
+
+ return false;
+}
+
+/* Enable this port for this VCAP instance */
+static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev,
+ unsigned long cookie)
+{
+ struct vcap_enabled_port *eport;
+
+ eport = kzalloc(sizeof(*eport), GFP_KERNEL);
+ if (!eport)
+ return -ENOMEM;
+
+ eport->ndev = ndev;
+ eport->cookie = cookie;
+ list_add_tail(&eport->list, &admin->enabled);
+
+ return 0;
+}
+
+/* Disable this port for this VCAP instance */
+static int vcap_disable(struct vcap_admin *admin, struct net_device *ndev,
+ unsigned long cookie)
+{
+ struct vcap_enabled_port *eport;
+
+ list_for_each_entry(eport, &admin->enabled, list) {
+ if (eport->cookie == cookie && eport->ndev == ndev) {
+ list_del(&eport->list);
+ kfree(eport);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/* Find the VCAP instance that enabled the port using a specific filter */
+static struct vcap_admin *vcap_find_admin_by_cookie(struct vcap_control *vctrl,
+ unsigned long cookie)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->cookie == cookie)
+ return admin;
+
+ return NULL;
+}
+
+/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
+int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
+ int chain_id, unsigned long cookie, bool enable)
+{
+ struct vcap_admin *admin;
+ int err;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+
+ if (!ndev)
+ return -ENODEV;
+
+ if (chain_id)
+ admin = vcap_find_admin(vctrl, chain_id);
+ else
+ admin = vcap_find_admin_by_cookie(vctrl, cookie);
+ if (!admin)
+ return -ENOENT;
+
+ /* first instance and first chain */
+ if (admin->vinst || chain_id > admin->first_cid)
+ return -EFAULT;
+
+ err = vctrl->ops->enable(ndev, admin, enable);
+ if (err)
+ return err;
+
+ if (chain_id) {
+ if (vcap_is_enabled(admin, ndev, cookie))
+ return -EADDRINUSE;
+ mutex_lock(&admin->lock);
+ vcap_enable(admin, ndev, cookie);
+ } else {
+ mutex_lock(&admin->lock);
+ vcap_disable(admin, ndev, cookie);
+ }
+ mutex_unlock(&admin->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_enable_lookups);
+
+/* Set a rule counter id (for certain vcaps only) */
+void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+
+ ri->counter_id = counter_id;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_set_counter_id);
+
+/* Provide all rules via a callback interface */
+int vcap_rule_iter(struct vcap_control *vctrl,
+ int (*callback)(void *, struct vcap_rule *), void *arg)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int ret;
+
+ ret = vcap_api_check(vctrl);
+ if (ret)
+ return ret;
+
+ /* Iterate all rules in each VCAP instance */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(ri, &admin->rules, list) {
+ ret = callback(arg, &ri->data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_iter);
+
+int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return err;
+ if (!ctr) {
+ pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ return vcap_write_counter(ri, ctr);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_set_counter);
+
+int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return err;
+ if (!ctr) {
+ pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ return vcap_read_counter(ri, ctr);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_get_counter);
+
+static int vcap_rule_mod_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ enum vcap_field_type ftype,
+ struct vcap_client_keyfield_data *data)
+{
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field)
+ return vcap_rule_add_key(rule, key, ftype, data);
+ vcap_copy_from_client_keyfield(rule, field, data);
+ return 0;
+}
+
+/* Modify a 32 bit key field with value and mask in the rule */
+int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask)
+{
+ struct vcap_client_keyfield_data data;
+
+ data.u32.value = value;
+ data.u32.mask = mask;
+ return vcap_rule_mod_key(rule, key, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_mod_key_u32);
+
+static int vcap_rule_mod_action(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_field_type ftype,
+ struct vcap_client_actionfield_data *data)
+{
+ struct vcap_client_actionfield *field;
+
+ field = vcap_find_actionfield(rule, action);
+ if (!field)
+ return vcap_rule_add_action(rule, action, ftype, data);
+ vcap_copy_from_client_actionfield(rule, field, data);
+ return 0;
+}
+
+/* Modify a 32 bit action field with value in the rule */
+int vcap_rule_mod_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value)
+{
+ struct vcap_client_actionfield_data data;
+
+ data.u32.value = value;
+ return vcap_rule_mod_action(rule, action, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_mod_action_u32);
+
+/* Drop keys in a keylist and any keys that are not supported by the keyset */
+int vcap_filter_rule_keys(struct vcap_rule *rule,
+ enum vcap_key_field keylist[], int length,
+ bool drop_unsupported)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *ckf, *next_ckf;
+ const struct vcap_field *fields;
+ enum vcap_key_field key;
+ int err = 0;
+ int idx;
+
+ if (length > 0) {
+ err = -EEXIST;
+ list_for_each_entry_safe(ckf, next_ckf,
+ &ri->data.keyfields, ctrl.list) {
+ key = ckf->ctrl.key;
+ for (idx = 0; idx < length; ++idx)
+ if (key == keylist[idx]) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ idx++;
+ err = 0;
+ }
+ }
+ }
+ if (drop_unsupported) {
+ err = -EEXIST;
+ fields = vcap_keyfields(ri->vctrl, ri->admin->vtype,
+ rule->keyset);
+ if (!fields)
+ return err;
+ list_for_each_entry_safe(ckf, next_ckf,
+ &ri->data.keyfields, ctrl.list) {
+ key = ckf->ctrl.key;
+ if (fields[key].width == 0) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ err = 0;
+ }
+ }
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_filter_rule_keys);
+
+/* Make a full copy of an existing rule with a new rule id */
+struct vcap_rule *vcap_copy_rule(struct vcap_rule *erule)
+{
+ struct vcap_rule_internal *ri = to_intrule(erule);
+ struct vcap_client_actionfield *caf;
+ struct vcap_client_keyfield *ckf;
+ struct vcap_rule *rule;
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return ERR_PTR(err);
+
+ rule = vcap_alloc_rule(ri->vctrl, ri->ndev, ri->data.vcap_chain_id,
+ ri->data.user, ri->data.priority, 0);
+ if (IS_ERR(rule))
+ return rule;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ /* Add a key duplicate in the new rule */
+ err = vcap_rule_add_key(rule,
+ ckf->ctrl.key,
+ ckf->ctrl.type,
+ &ckf->data);
+ if (err)
+ goto err;
+ }
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ /* Add a action duplicate in the new rule */
+ err = vcap_rule_add_action(rule,
+ caf->ctrl.action,
+ caf->ctrl.type,
+ &caf->data);
+ if (err)
+ goto err;
+ }
+ return rule;
+err:
+ vcap_free_rule(rule);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(vcap_copy_rule);
+
+#ifdef CONFIG_VCAP_KUNIT_TEST
+#include "vcap_api_kunit.c"
+#endif
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
new file mode 100644
index 000000000000..689c7270f2a8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API__
+#define __VCAP_API__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+/* Use the generated API model */
+#include "vcap_ag_api.h"
+
+#define VCAP_CID_LOOKUP_SIZE 100000 /* Chains in a lookup */
+#define VCAP_CID_INGRESS_L0 1000000 /* Ingress Stage 1 Lookup 0 */
+#define VCAP_CID_INGRESS_L1 1100000 /* Ingress Stage 1 Lookup 1 */
+#define VCAP_CID_INGRESS_L2 1200000 /* Ingress Stage 1 Lookup 2 */
+#define VCAP_CID_INGRESS_L3 1300000 /* Ingress Stage 1 Lookup 3 */
+#define VCAP_CID_INGRESS_L4 1400000 /* Ingress Stage 1 Lookup 4 */
+#define VCAP_CID_INGRESS_L5 1500000 /* Ingress Stage 1 Lookup 5 */
+
+#define VCAP_CID_PREROUTING_IPV6 3000000 /* Prerouting Stage */
+#define VCAP_CID_PREROUTING 6000000 /* Prerouting Stage */
+
+#define VCAP_CID_INGRESS_STAGE2_L0 8000000 /* Ingress Stage 2 Lookup 0 */
+#define VCAP_CID_INGRESS_STAGE2_L1 8100000 /* Ingress Stage 2 Lookup 1 */
+#define VCAP_CID_INGRESS_STAGE2_L2 8200000 /* Ingress Stage 2 Lookup 2 */
+#define VCAP_CID_INGRESS_STAGE2_L3 8300000 /* Ingress Stage 2 Lookup 3 */
+
+#define VCAP_CID_EGRESS_L0 10000000 /* Egress Lookup 0 */
+#define VCAP_CID_EGRESS_L1 10100000 /* Egress Lookup 1 */
+
+#define VCAP_CID_EGRESS_STAGE2_L0 20000000 /* Egress Stage 2 Lookup 0 */
+#define VCAP_CID_EGRESS_STAGE2_L1 20100000 /* Egress Stage 2 Lookup 1 */
+
+/* Known users of the VCAP API */
+enum vcap_user {
+ VCAP_USER_PTP,
+ VCAP_USER_MRP,
+ VCAP_USER_CFM,
+ VCAP_USER_VLAN,
+ VCAP_USER_QOS,
+ VCAP_USER_VCAP_UTIL,
+ VCAP_USER_TC,
+ VCAP_USER_TC_EXTRA,
+
+ /* add new users above here */
+
+ /* used to define VCAP_USER_MAX below */
+ __VCAP_USER_AFTER_LAST,
+ VCAP_USER_MAX = __VCAP_USER_AFTER_LAST - 1,
+};
+
+/* VCAP information used for displaying data */
+struct vcap_statistics {
+ char *name;
+ int count;
+ const char * const *keyfield_set_names;
+ const char * const *actionfield_set_names;
+ const char * const *keyfield_names;
+ const char * const *actionfield_names;
+};
+
+/* VCAP key/action field type, position and width */
+struct vcap_field {
+ u16 type;
+ u16 width;
+ u16 offset;
+};
+
+/* VCAP keyset or actionset type and width */
+struct vcap_set {
+ u8 type_id;
+ u8 sw_per_item;
+ u8 sw_cnt;
+};
+
+/* VCAP typegroup position and bitvalue */
+struct vcap_typegroup {
+ u16 offset;
+ u16 width;
+ u16 value;
+};
+
+/* VCAP model data */
+struct vcap_info {
+ char *name; /* user-friendly name */
+ u16 rows; /* number of row in instance */
+ u16 sw_count; /* maximum subwords used per rule */
+ u16 sw_width; /* bits per subword in a keyset */
+ u16 sticky_width; /* sticky bits per rule */
+ u16 act_width; /* bits per subword in an actionset */
+ u16 default_cnt; /* number of default rules */
+ u16 require_cnt_dis; /* not used */
+ u16 version; /* vcap rtl version */
+ const struct vcap_set *keyfield_set; /* keysets */
+ int keyfield_set_size; /* number of keysets */
+ const struct vcap_set *actionfield_set; /* actionsets */
+ int actionfield_set_size; /* number of actionsets */
+ /* map of keys per keyset */
+ const struct vcap_field **keyfield_set_map;
+ /* number of entries in the above map */
+ int *keyfield_set_map_size;
+ /* map of actions per actionset */
+ const struct vcap_field **actionfield_set_map;
+ /* number of entries in the above map */
+ int *actionfield_set_map_size;
+ /* map of keyset typegroups per subword size */
+ const struct vcap_typegroup **keyfield_set_typegroups;
+ /* map of actionset typegroups per subword size */
+ const struct vcap_typegroup **actionfield_set_typegroups;
+};
+
+enum vcap_field_type {
+ VCAP_FIELD_BIT,
+ VCAP_FIELD_U32,
+ VCAP_FIELD_U48,
+ VCAP_FIELD_U56,
+ VCAP_FIELD_U64,
+ VCAP_FIELD_U72,
+ VCAP_FIELD_U112,
+ VCAP_FIELD_U128,
+};
+
+/* VCAP rule data towards the VCAP cache */
+struct vcap_cache_data {
+ u32 *keystream;
+ u32 *maskstream;
+ u32 *actionstream;
+ u32 counter;
+ bool sticky;
+};
+
+/* Selects which part of the rule must be updated */
+enum vcap_selection {
+ VCAP_SEL_ENTRY = 0x01,
+ VCAP_SEL_ACTION = 0x02,
+ VCAP_SEL_COUNTER = 0x04,
+ VCAP_SEL_ALL = 0xff,
+};
+
+/* Commands towards the VCAP cache */
+enum vcap_command {
+ VCAP_CMD_WRITE = 0,
+ VCAP_CMD_READ = 1,
+ VCAP_CMD_MOVE_DOWN = 2,
+ VCAP_CMD_MOVE_UP = 3,
+ VCAP_CMD_INITIALIZE = 4,
+};
+
+enum vcap_rule_error {
+ VCAP_ERR_NONE = 0, /* No known error */
+ VCAP_ERR_NO_ADMIN, /* No admin instance */
+ VCAP_ERR_NO_NETDEV, /* No netdev instance */
+ VCAP_ERR_NO_KEYSET_MATCH, /* No keyset matched the rule keys */
+ VCAP_ERR_NO_ACTIONSET_MATCH, /* No actionset matched the rule actions */
+ VCAP_ERR_NO_PORT_KEYSET_MATCH, /* No port keyset matched the rule keys */
+};
+
+/* Administration of each VCAP instance */
+struct vcap_admin {
+ struct list_head list; /* for insertion in vcap_control */
+ struct list_head rules; /* list of rules */
+ struct list_head enabled; /* list of enabled ports */
+ struct mutex lock; /* control access to rules */
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int tgt_inst; /* hardware instance number */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int last_valid_addr; /* top of address range to be used */
+ int first_valid_addr; /* bottom of address range to be used */
+ int last_used_addr; /* address of lowest added rule */
+ bool w32be; /* vcap uses "32bit-word big-endian" encoding */
+ struct vcap_cache_data cache; /* encoded rule data */
+};
+
+/* Client supplied VCAP rule data */
+struct vcap_rule {
+ int vcap_chain_id; /* chain used for this rule */
+ enum vcap_user user; /* rule owner */
+ u16 priority;
+ u32 id; /* vcap rule id, must be unique, 0 will auto-generate a value */
+ u64 cookie; /* used by the client to identify the rule */
+ struct list_head keyfields; /* list of vcap_client_keyfield */
+ struct list_head actionfields; /* list of vcap_client_actionfield */
+ enum vcap_keyfield_set keyset; /* keyset used: may be derived from fields */
+ enum vcap_actionfield_set actionset; /* actionset used: may be derived from fields */
+ enum vcap_rule_error exterr; /* extended error - used by TC */
+ u64 client; /* space for client defined data */
+};
+
+/* List of keysets */
+struct vcap_keyset_list {
+ int max; /* size of the keyset list */
+ int cnt; /* count of keysets actually in the list */
+ enum vcap_keyfield_set *keysets; /* the list of keysets */
+};
+
+/* Client output printf-like function with destination */
+struct vcap_output_print {
+ __printf(2, 3)
+ void (*prf)(void *out, const char *fmt, ...);
+ void *dst;
+};
+
+/* Client supplied VCAP callback operations */
+struct vcap_operations {
+ /* validate port keyset operation */
+ enum vcap_keyfield_set (*validate_keyset)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto);
+ /* add default rule fields for the selected keyset operations */
+ void (*add_default_fields)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule);
+ /* cache operations */
+ void (*cache_erase)
+ (struct vcap_admin *admin);
+ void (*cache_write)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 idx, u32 count);
+ void (*cache_read)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 idx,
+ u32 count);
+ /* block operations */
+ void (*init)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count);
+ void (*update)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel,
+ u32 addr);
+ void (*move)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ u32 addr,
+ int offset,
+ int count);
+ /* informational */
+ int (*port_info)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+ /* enable/disable the lookups in a vcap instance */
+ int (*enable)
+ (struct net_device *ndev,
+ struct vcap_admin *admin,
+ bool enable);
+};
+
+/* VCAP API Client control interface */
+struct vcap_control {
+ struct vcap_operations *ops; /* client supplied operations */
+ const struct vcap_info *vcaps; /* client supplied vcap models */
+ const struct vcap_statistics *stats; /* client supplied vcap stats */
+ struct list_head list; /* list of vcap instances */
+};
+
+/* Set client control interface on the API */
+int vcap_api_set_client(struct vcap_control *vctrl);
+
+#endif /* __VCAP_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
new file mode 100644
index 000000000000..0319866f9c94
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API_CLIENT__
+#define __VCAP_API_CLIENT__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include "vcap_api.h"
+
+/* Client supplied VCAP rule key control part */
+struct vcap_client_keyfield_ctrl {
+ struct list_head list; /* For insertion into a rule */
+ enum vcap_key_field key;
+ enum vcap_field_type type;
+};
+
+struct vcap_u1_key {
+ u8 value;
+ u8 mask;
+};
+
+struct vcap_u32_key {
+ u32 value;
+ u32 mask;
+};
+
+struct vcap_u48_key {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct vcap_u56_key {
+ u8 value[7];
+ u8 mask[7];
+};
+
+struct vcap_u64_key {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct vcap_u72_key {
+ u8 value[9];
+ u8 mask[9];
+};
+
+struct vcap_u112_key {
+ u8 value[14];
+ u8 mask[14];
+};
+
+struct vcap_u128_key {
+ u8 value[16];
+ u8 mask[16];
+};
+
+/* Client supplied VCAP rule field data */
+struct vcap_client_keyfield_data {
+ union {
+ struct vcap_u1_key u1;
+ struct vcap_u32_key u32;
+ struct vcap_u48_key u48;
+ struct vcap_u56_key u56;
+ struct vcap_u64_key u64;
+ struct vcap_u72_key u72;
+ struct vcap_u112_key u112;
+ struct vcap_u128_key u128;
+ };
+};
+
+/* Client supplied VCAP rule key (value, mask) */
+struct vcap_client_keyfield {
+ struct vcap_client_keyfield_ctrl ctrl;
+ struct vcap_client_keyfield_data data;
+};
+
+/* Client supplied VCAP rule action control part */
+struct vcap_client_actionfield_ctrl {
+ struct list_head list; /* For insertion into a rule */
+ enum vcap_action_field action;
+ enum vcap_field_type type;
+};
+
+struct vcap_u1_action {
+ u8 value;
+};
+
+struct vcap_u32_action {
+ u32 value;
+};
+
+struct vcap_u48_action {
+ u8 value[6];
+};
+
+struct vcap_u56_action {
+ u8 value[7];
+};
+
+struct vcap_u64_action {
+ u8 value[8];
+};
+
+struct vcap_u72_action {
+ u8 value[9];
+};
+
+struct vcap_u112_action {
+ u8 value[14];
+};
+
+struct vcap_u128_action {
+ u8 value[16];
+};
+
+struct vcap_client_actionfield_data {
+ union {
+ struct vcap_u1_action u1;
+ struct vcap_u32_action u32;
+ struct vcap_u48_action u48;
+ struct vcap_u56_action u56;
+ struct vcap_u64_action u64;
+ struct vcap_u72_action u72;
+ struct vcap_u112_action u112;
+ struct vcap_u128_action u128;
+ };
+};
+
+struct vcap_client_actionfield {
+ struct vcap_client_actionfield_ctrl ctrl;
+ struct vcap_client_actionfield_data data;
+};
+
+enum vcap_bit {
+ VCAP_BIT_ANY,
+ VCAP_BIT_0,
+ VCAP_BIT_1
+};
+
+struct vcap_counter {
+ u32 value;
+ bool sticky;
+};
+
+/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
+int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
+ int chain_id, unsigned long cookie, bool enable);
+
+/* VCAP rule operations */
+/* Allocate a rule and fill in the basic information */
+struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ int vcap_chain_id,
+ enum vcap_user user,
+ u16 priority,
+ u32 id);
+/* Free mem of a rule owned by client */
+void vcap_free_rule(struct vcap_rule *rule);
+/* Validate a rule before adding it to the VCAP */
+int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto);
+/* Add rule to a VCAP instance */
+int vcap_add_rule(struct vcap_rule *rule);
+/* Delete rule in a VCAP instance */
+int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id);
+/* Make a full copy of an existing rule with a new rule id */
+struct vcap_rule *vcap_copy_rule(struct vcap_rule *rule);
+/* Get rule from a VCAP instance */
+struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id);
+/* Update existing rule */
+int vcap_mod_rule(struct vcap_rule *rule);
+
+/* Update the keyset for the rule */
+int vcap_set_rule_set_keyset(struct vcap_rule *rule,
+ enum vcap_keyfield_set keyset);
+/* Update the actionset for the rule */
+int vcap_set_rule_set_actionset(struct vcap_rule *rule,
+ enum vcap_actionfield_set actionset);
+/* Set a rule counter id (for certain VCAPs only) */
+void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id);
+
+/* VCAP rule field operations */
+int vcap_rule_add_key_bit(struct vcap_rule *rule, enum vcap_key_field key,
+ enum vcap_bit val);
+int vcap_rule_add_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask);
+int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u48_key *fieldval);
+int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u72_key *fieldval);
+int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u128_key *fieldval);
+int vcap_rule_add_action_bit(struct vcap_rule *rule,
+ enum vcap_action_field action, enum vcap_bit val);
+int vcap_rule_add_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action, u32 value);
+
+/* VCAP rule counter operations */
+int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
+int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
+
+/* VCAP lookup operations */
+/* Convert a chain id to a VCAP lookup index */
+int vcap_chain_id_to_lookup(struct vcap_admin *admin, int cur_cid);
+/* Lookup a vcap instance using chain id */
+struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid);
+/* Find information on a key field in a rule */
+const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
+ enum vcap_key_field key);
+/* Find a rule id with a provided cookie */
+int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie);
+/* Is the next chain id in the following lookup, possible in another VCAP */
+bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
+/* Provide all rules via a callback interface */
+int vcap_rule_iter(struct vcap_control *vctrl,
+ int (*callback)(void *, struct vcap_rule *), void *arg);
+/* Match a list of keys against the keysets available in a vcap type */
+bool vcap_rule_find_keysets(struct vcap_rule *rule,
+ struct vcap_keyset_list *matches);
+/* Return the keyset information for the keyset */
+const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset);
+/* Copy to host byte order */
+void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
+
+/* Convert validation error code into tc extact error message */
+void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule);
+
+/* Cleanup a VCAP instance */
+int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin);
+
+/* Add a keyset to a keyset list */
+bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
+ enum vcap_keyfield_set keyset);
+/* Drop keys in a keylist and any keys that are not supported by the keyset */
+int vcap_filter_rule_keys(struct vcap_rule *rule,
+ enum vcap_key_field keylist[], int length,
+ bool drop_unsupported);
+
+/* map keyset id to a string with the keyset name */
+const char *vcap_keyset_name(struct vcap_control *vctrl,
+ enum vcap_keyfield_set keyset);
+/* map key field id to a string with the key name */
+const char *vcap_keyfield_name(struct vcap_control *vctrl,
+ enum vcap_key_field key);
+
+/* Modify a 32 bit key field with value and mask in the rule */
+int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask);
+/* Modify a 32 bit action field with value in the rule */
+int vcap_rule_mod_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value);
+
+/* Get a 32 bit key field value and mask from the rule */
+int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 *value, u32 *mask);
+
+#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
new file mode 100644
index 000000000000..895bfff550d2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP API debug file system support
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ */
+
+#include "vcap_api_private.h"
+#include "vcap_api_debugfs.h"
+
+struct vcap_admin_debugfs_info {
+ struct vcap_control *vctrl;
+ struct vcap_admin *admin;
+};
+
+struct vcap_port_debugfs_info {
+ struct vcap_control *vctrl;
+ struct net_device *ndev;
+};
+
+/* Dump the keyfields value and mask values */
+static void vcap_debugfs_show_rule_keyfield(struct vcap_control *vctrl,
+ struct vcap_output_print *out,
+ enum vcap_key_field key,
+ const struct vcap_field *keyfield,
+ struct vcap_client_keyfield_data *data)
+{
+ bool hex = false;
+ u8 *value, *mask;
+ int idx, bytes;
+
+ out->prf(out->dst, " %s: W%d: ", vcap_keyfield_name(vctrl, key),
+ keyfield[key].width);
+
+ switch (keyfield[key].type) {
+ case VCAP_FIELD_BIT:
+ out->prf(out->dst, "%d/%d", data->u1.value, data->u1.mask);
+ break;
+ case VCAP_FIELD_U32:
+ value = (u8 *)(&data->u32.value);
+ mask = (u8 *)(&data->u32.mask);
+
+ if (key == VCAP_KF_L3_IP4_SIP || key == VCAP_KF_L3_IP4_DIP) {
+ out->prf(out->dst, "%pI4h/%pI4h", &data->u32.value,
+ &data->u32.mask);
+ } else if (key == VCAP_KF_ETYPE ||
+ key == VCAP_KF_IF_IGR_PORT_MASK) {
+ hex = true;
+ } else {
+ u32 fmsk = (1 << keyfield[key].width) - 1;
+
+ out->prf(out->dst, "%u/%u", data->u32.value & fmsk,
+ data->u32.mask & fmsk);
+ }
+ break;
+ case VCAP_FIELD_U48:
+ value = data->u48.value;
+ mask = data->u48.mask;
+ if (key == VCAP_KF_L2_SMAC || key == VCAP_KF_L2_DMAC)
+ out->prf(out->dst, "%pMR/%pMR", data->u48.value,
+ data->u48.mask);
+ else
+ hex = true;
+ break;
+ case VCAP_FIELD_U56:
+ value = data->u56.value;
+ mask = data->u56.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U64:
+ value = data->u64.value;
+ mask = data->u64.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U72:
+ value = data->u72.value;
+ mask = data->u72.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U112:
+ value = data->u112.value;
+ mask = data->u112.mask;
+ hex = true;
+ break;
+ case VCAP_FIELD_U128:
+ if (key == VCAP_KF_L3_IP6_SIP || key == VCAP_KF_L3_IP6_DIP) {
+ u8 nvalue[16], nmask[16];
+
+ vcap_netbytes_copy(nvalue, data->u128.value,
+ sizeof(nvalue));
+ vcap_netbytes_copy(nmask, data->u128.mask,
+ sizeof(nmask));
+ out->prf(out->dst, "%pI6/%pI6", nvalue, nmask);
+ } else {
+ hex = true;
+ }
+ break;
+ }
+ if (hex) {
+ bytes = DIV_ROUND_UP(keyfield[key].width, BITS_PER_BYTE);
+ out->prf(out->dst, "0x");
+ for (idx = 0; idx < bytes; ++idx)
+ out->prf(out->dst, "%02x", value[bytes - idx - 1]);
+ out->prf(out->dst, "/0x");
+ for (idx = 0; idx < bytes; ++idx)
+ out->prf(out->dst, "%02x", mask[bytes - idx - 1]);
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void
+vcap_debugfs_show_rule_actionfield(struct vcap_control *vctrl,
+ struct vcap_output_print *out,
+ enum vcap_action_field action,
+ const struct vcap_field *actionfield,
+ u8 *value)
+{
+ bool hex = false;
+ int idx, bytes;
+ u32 fmsk, val;
+
+ out->prf(out->dst, " %s: W%d: ",
+ vcap_actionfield_name(vctrl, action),
+ actionfield[action].width);
+
+ switch (actionfield[action].type) {
+ case VCAP_FIELD_BIT:
+ out->prf(out->dst, "%d", value[0]);
+ break;
+ case VCAP_FIELD_U32:
+ fmsk = (1 << actionfield[action].width) - 1;
+ val = *(u32 *)value;
+ out->prf(out->dst, "%u", val & fmsk);
+ break;
+ case VCAP_FIELD_U48:
+ case VCAP_FIELD_U56:
+ case VCAP_FIELD_U64:
+ case VCAP_FIELD_U72:
+ case VCAP_FIELD_U112:
+ case VCAP_FIELD_U128:
+ hex = true;
+ break;
+ }
+ if (hex) {
+ bytes = DIV_ROUND_UP(actionfield[action].width, BITS_PER_BYTE);
+ out->prf(out->dst, "0x");
+ for (idx = 0; idx < bytes; ++idx)
+ out->prf(out->dst, "%02x", value[bytes - idx - 1]);
+ }
+ out->prf(out->dst, "\n");
+}
+
+static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ enum vcap_keyfield_set keysets[10];
+ const struct vcap_field *keyfield;
+ enum vcap_type vt = admin->vtype;
+ struct vcap_client_keyfield *ckf;
+ struct vcap_keyset_list matches;
+ u32 *maskstream;
+ u32 *keystream;
+ int res;
+
+ keystream = admin->cache.keystream;
+ maskstream = admin->cache.maskstream;
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+ res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream,
+ false, 0, &matches);
+ if (res < 0) {
+ pr_err("%s:%d: could not find valid keysets: %d\n",
+ __func__, __LINE__, res);
+ return -EINVAL;
+ }
+ out->prf(out->dst, " keysets:");
+ for (int idx = 0; idx < matches.cnt; ++idx)
+ out->prf(out->dst, " %s",
+ vcap_keyset_name(vctrl, matches.keysets[idx]));
+ out->prf(out->dst, "\n");
+ out->prf(out->dst, " keyset_sw: %d\n", ri->keyset_sw);
+ out->prf(out->dst, " keyset_sw_regs: %d\n", ri->keyset_sw_regs);
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ keyfield = vcap_keyfields(vctrl, admin->vtype, ri->data.keyset);
+ vcap_debugfs_show_rule_keyfield(vctrl, out, ckf->ctrl.key,
+ keyfield, &ckf->data);
+ }
+
+ return 0;
+}
+
+static int vcap_debugfs_show_rule_actionset(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *actionfield;
+ struct vcap_client_actionfield *caf;
+
+ out->prf(out->dst, " actionset: %s\n",
+ vcap_actionset_name(vctrl, ri->data.actionset));
+ out->prf(out->dst, " actionset_sw: %d\n", ri->actionset_sw);
+ out->prf(out->dst, " actionset_sw_regs: %d\n", ri->actionset_sw_regs);
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ actionfield = vcap_actionfields(vctrl, admin->vtype,
+ ri->data.actionset);
+ vcap_debugfs_show_rule_actionfield(vctrl, out, caf->ctrl.action,
+ actionfield,
+ &caf->data.u1.value);
+ }
+
+ return 0;
+}
+
+static void vcap_show_admin_rule(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out,
+ struct vcap_rule_internal *ri)
+{
+ ri->counter.value = admin->cache.counter;
+ ri->counter.sticky = admin->cache.sticky;
+ out->prf(out->dst,
+ "rule: %u, addr: [%d,%d], X%d, ctr[%d]: %d, hit: %d\n",
+ ri->data.id, ri->addr, ri->addr + ri->size - 1, ri->size,
+ ri->counter_id, ri->counter.value, ri->counter.sticky);
+ out->prf(out->dst, " chain_id: %d\n", ri->data.vcap_chain_id);
+ out->prf(out->dst, " user: %d\n", ri->data.user);
+ out->prf(out->dst, " priority: %d\n", ri->data.priority);
+ vcap_debugfs_show_rule_keyset(ri, out);
+ vcap_debugfs_show_rule_actionset(ri, out);
+}
+
+static void vcap_show_admin_info(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ const struct vcap_info *vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "name: %s\n", vcap->name);
+ out->prf(out->dst, "rows: %d\n", vcap->rows);
+ out->prf(out->dst, "sw_count: %d\n", vcap->sw_count);
+ out->prf(out->dst, "sw_width: %d\n", vcap->sw_width);
+ out->prf(out->dst, "sticky_width: %d\n", vcap->sticky_width);
+ out->prf(out->dst, "act_width: %d\n", vcap->act_width);
+ out->prf(out->dst, "default_cnt: %d\n", vcap->default_cnt);
+ out->prf(out->dst, "require_cnt_dis: %d\n", vcap->require_cnt_dis);
+ out->prf(out->dst, "version: %d\n", vcap->version);
+ out->prf(out->dst, "vtype: %d\n", admin->vtype);
+ out->prf(out->dst, "vinst: %d\n", admin->vinst);
+ out->prf(out->dst, "first_cid: %d\n", admin->first_cid);
+ out->prf(out->dst, "last_cid: %d\n", admin->last_cid);
+ out->prf(out->dst, "lookups: %d\n", admin->lookups);
+ out->prf(out->dst, "first_valid_addr: %d\n", admin->first_valid_addr);
+ out->prf(out->dst, "last_valid_addr: %d\n", admin->last_valid_addr);
+ out->prf(out->dst, "last_used_addr: %d\n", admin->last_used_addr);
+}
+
+static int vcap_show_admin(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct vcap_rule_internal *elem;
+ struct vcap_rule *vrule;
+ int ret = 0;
+
+ vcap_show_admin_info(vctrl, admin, out);
+ list_for_each_entry(elem, &admin->rules, list) {
+ vrule = vcap_get_rule(vctrl, elem->data.id);
+ if (IS_ERR_OR_NULL(vrule)) {
+ ret = PTR_ERR(vrule);
+ break;
+ }
+
+ out->prf(out->dst, "\n");
+ vcap_show_admin_rule(vctrl, admin, out, to_intrule(vrule));
+ vcap_free_rule(vrule);
+ }
+ return ret;
+}
+
+static int vcap_show_admin_raw(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ enum vcap_keyfield_set keysets[10];
+ enum vcap_type vt = admin->vtype;
+ struct vcap_keyset_list kslist;
+ struct vcap_rule_internal *ri;
+ const struct vcap_set *info;
+ int addr, idx;
+ int ret;
+
+ if (list_empty(&admin->rules))
+ return 0;
+
+ ret = vcap_api_check(vctrl);
+ if (ret)
+ return ret;
+
+ ri = list_first_entry(&admin->rules, struct vcap_rule_internal, list);
+
+ /* Go from higher to lower addresses searching for a keyset */
+ kslist.keysets = keysets;
+ kslist.max = ARRAY_SIZE(keysets);
+ for (addr = admin->last_valid_addr; addr >= admin->first_valid_addr;
+ --addr) {
+ kslist.cnt = 0;
+ ret = vcap_addr_keysets(vctrl, ri->ndev, admin, addr, &kslist);
+ if (ret < 0)
+ continue;
+ info = vcap_keyfieldset(vctrl, vt, kslist.keysets[0]);
+ if (!info)
+ continue;
+ if (addr % info->sw_per_item) {
+ pr_info("addr: %d X%d error rule, keyset: %s\n",
+ addr,
+ info->sw_per_item,
+ vcap_keyset_name(vctrl, kslist.keysets[0]));
+ } else {
+ out->prf(out->dst, " addr: %d, X%d rule, keysets:",
+ addr,
+ info->sw_per_item);
+ for (idx = 0; idx < kslist.cnt; ++idx)
+ out->prf(out->dst, " %s",
+ vcap_keyset_name(vctrl,
+ kslist.keysets[idx]));
+ out->prf(out->dst, "\n");
+ }
+ }
+ return 0;
+}
+
+/* Show the port configuration and status */
+static int vcap_port_debugfs_show(struct seq_file *m, void *unused)
+{
+ struct vcap_port_debugfs_info *info = m->private;
+ struct vcap_admin *admin;
+ struct vcap_output_print out = {
+ .prf = (void *)seq_printf,
+ .dst = m,
+ };
+
+ list_for_each_entry(admin, &info->vctrl->list, list) {
+ if (admin->vinst)
+ continue;
+ info->vctrl->ops->port_info(info->ndev, admin, &out);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vcap_port_debugfs);
+
+void vcap_port_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl,
+ struct net_device *ndev)
+{
+ struct vcap_port_debugfs_info *info;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ info->vctrl = vctrl;
+ info->ndev = ndev;
+ debugfs_create_file(netdev_name(ndev), 0444, parent, info,
+ &vcap_port_debugfs_fops);
+}
+EXPORT_SYMBOL_GPL(vcap_port_debugfs);
+
+/* Show the full VCAP instance data (rules with all fields) */
+static int vcap_debugfs_show(struct seq_file *m, void *unused)
+{
+ struct vcap_admin_debugfs_info *info = m->private;
+ struct vcap_output_print out = {
+ .prf = (void *)seq_printf,
+ .dst = m,
+ };
+
+ return vcap_show_admin(info->vctrl, info->admin, &out);
+}
+DEFINE_SHOW_ATTRIBUTE(vcap_debugfs);
+
+/* Show the raw VCAP instance data (rules with address info) */
+static int vcap_raw_debugfs_show(struct seq_file *m, void *unused)
+{
+ struct vcap_admin_debugfs_info *info = m->private;
+ struct vcap_output_print out = {
+ .prf = (void *)seq_printf,
+ .dst = m,
+ };
+
+ return vcap_show_admin_raw(info->vctrl, info->admin, &out);
+}
+DEFINE_SHOW_ATTRIBUTE(vcap_raw_debugfs);
+
+struct dentry *vcap_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl)
+{
+ struct vcap_admin_debugfs_info *info;
+ struct vcap_admin *admin;
+ struct dentry *dir;
+ char name[50];
+
+ dir = debugfs_create_dir("vcaps", parent);
+ if (PTR_ERR_OR_ZERO(dir))
+ return NULL;
+ list_for_each_entry(admin, &vctrl->list, list) {
+ sprintf(name, "raw_%s_%d", vctrl->vcaps[admin->vtype].name,
+ admin->vinst);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+ info->vctrl = vctrl;
+ info->admin = admin;
+ debugfs_create_file(name, 0444, dir, info,
+ &vcap_raw_debugfs_fops);
+ sprintf(name, "%s_%d", vctrl->vcaps[admin->vtype].name,
+ admin->vinst);
+ debugfs_create_file(name, 0444, dir, info, &vcap_debugfs_fops);
+ }
+ return dir;
+}
+EXPORT_SYMBOL_GPL(vcap_debugfs);
+
+#ifdef CONFIG_VCAP_KUNIT_TEST
+#include "vcap_api_debugfs_kunit.c"
+#endif
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h
new file mode 100644
index 000000000000..9f2c59b5f6f5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API_DEBUGFS__
+#define __VCAP_API_DEBUGFS__
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+
+#include "vcap_api.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+void vcap_port_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl,
+ struct net_device *ndev);
+
+/* Create a debugFS entry for a vcap instance */
+struct dentry *vcap_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl);
+
+#else
+
+static inline void vcap_port_debugfs(struct device *dev, struct dentry *parent,
+ struct vcap_control *vctrl,
+ struct net_device *ndev)
+{
+}
+
+static inline struct dentry *vcap_debugfs(struct device *dev,
+ struct dentry *parent,
+ struct vcap_control *vctrl)
+{
+ return NULL;
+}
+
+#endif
+#endif /* __VCAP_API_DEBUGFS__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
new file mode 100644
index 000000000000..cf594668d5d9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API kunit test suite
+ */
+
+#include <kunit/test.h>
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
+#include "vcap_model_kunit.h"
+
+/* First we have the test infrastructure that emulates the platform
+ * implementation
+ */
+#define TEST_BUF_CNT 100
+#define TEST_BUF_SZ 350
+#define STREAMWSIZE 64
+
+static u32 test_updateaddr[STREAMWSIZE] = {};
+static int test_updateaddridx;
+static int test_cache_erase_count;
+static u32 test_init_start;
+static u32 test_init_count;
+static u32 test_hw_counter_id;
+static struct vcap_cache_data test_hw_cache;
+static struct net_device test_netdev = {};
+static int test_move_addr;
+static int test_move_offset;
+static int test_move_count;
+static char test_pr_buffer[TEST_BUF_CNT][TEST_BUF_SZ];
+static int test_pr_bufferidx;
+static int test_pr_idx;
+
+/* Callback used by the VCAP API */
+static enum vcap_keyfield_set test_val_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ int idx;
+
+ if (kslist->cnt > 0) {
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_ETAG)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] ==
+ VCAP_KFS_PURE_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] ==
+ VCAP_KFS_NORMAL_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] ==
+ VCAP_KFS_NORMAL_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_MAC_ETYPE)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_ARP)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_IP_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ default:
+ pr_info("%s:%d: no validation for VCAP %d\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Callback used by the VCAP API */
+static void test_add_def_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ if (admin->vinst == 0 || admin->vinst == 2)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_erase(struct vcap_admin *admin)
+{
+ if (test_cache_erase_count) {
+ memset(admin->cache.keystream, 0, test_cache_erase_count);
+ memset(admin->cache.maskstream, 0, test_cache_erase_count);
+ memset(admin->cache.actionstream, 0, test_cache_erase_count);
+ test_cache_erase_count = 0;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_init(struct net_device *ndev, struct vcap_admin *admin,
+ u32 start, u32 count)
+{
+ test_init_start = start;
+ test_init_count = count;
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_read(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ pr_debug("%s:%d: %d %d\n", __func__, __LINE__, start, count);
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before decoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ admin->cache.counter = test_hw_cache.counter;
+ admin->cache.sticky = test_hw_cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_write(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before encoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ test_hw_cache.counter = admin->cache.counter;
+ test_hw_cache.sticky = admin->cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_update(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ if (test_updateaddridx < ARRAY_SIZE(test_updateaddr))
+ test_updateaddr[test_updateaddridx] = addr;
+ else
+ pr_err("%s:%d: overflow: %d\n", __func__, __LINE__,
+ test_updateaddridx);
+ test_updateaddridx++;
+}
+
+static void test_cache_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ test_move_addr = addr;
+ test_move_offset = offset;
+ test_move_count = count;
+}
+
+/* Provide port information via a callback interface */
+static int vcap_test_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+static int vcap_test_enable(struct net_device *ndev,
+ struct vcap_admin *admin,
+ bool enable)
+{
+ return 0;
+}
+
+static struct vcap_operations test_callbacks = {
+ .validate_keyset = test_val_keyset,
+ .add_default_fields = test_add_def_fields,
+ .cache_erase = test_cache_erase,
+ .cache_write = test_cache_write,
+ .cache_read = test_cache_read,
+ .init = test_cache_init,
+ .update = test_cache_update,
+ .move = test_cache_move,
+ .port_info = vcap_test_port_info,
+ .enable = vcap_test_enable,
+};
+
+static struct vcap_control test_vctrl = {
+ .vcaps = kunit_test_vcaps,
+ .stats = &kunit_test_vcap_stats,
+ .ops = &test_callbacks,
+};
+
+static void vcap_test_api_init(struct vcap_admin *admin)
+{
+ /* Initialize the shared objects */
+ INIT_LIST_HEAD(&test_vctrl.list);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ list_add_tail(&admin->list, &test_vctrl.list);
+ memset(test_updateaddr, 0, sizeof(test_updateaddr));
+ test_updateaddridx = 0;
+ test_pr_bufferidx = 0;
+ test_pr_idx = 0;
+}
+
+/* callback used by the show_admin function */
+static __printf(2, 3)
+int test_prf(void *out, const char *fmt, ...)
+{
+ static char test_buffer[TEST_BUF_SZ];
+ va_list args;
+ int idx, cnt;
+
+ if (test_pr_bufferidx >= TEST_BUF_CNT) {
+ pr_err("%s:%d: overflow: %d\n", __func__, __LINE__,
+ test_pr_bufferidx);
+ return 0;
+ }
+
+ va_start(args, fmt);
+ cnt = vscnprintf(test_buffer, TEST_BUF_SZ, fmt, args);
+ va_end(args);
+
+ for (idx = 0; idx < cnt; ++idx) {
+ test_pr_buffer[test_pr_bufferidx][test_pr_idx] =
+ test_buffer[idx];
+ if (test_buffer[idx] == '\n') {
+ test_pr_buffer[test_pr_bufferidx][++test_pr_idx] = 0;
+ test_pr_idx = 0;
+ test_pr_bufferidx++;
+ } else {
+ ++test_pr_idx;
+ }
+ }
+
+ return cnt;
+}
+
+/* Define the test cases. */
+
+static void vcap_api_addr_keyset_test(struct kunit *test)
+{
+ u32 keydata[12] = {
+ 0x40450042, 0x000feaf3, 0x00000003, 0x00050600,
+ 0x10203040, 0x00075880, 0x633c6864, 0x00040003,
+ 0x00000020, 0x00000008, 0x00000240, 0x00000000,
+ };
+ u32 mskdata[12] = {
+ 0x0030ff80, 0xfff00000, 0xfffffffc, 0xfff000ff,
+ 0x00000000, 0xfff00000, 0x00000000, 0xfff3fffc,
+ 0xffffffc0, 0xffffffff, 0xfffffc03, 0xffffffff,
+ };
+ u32 actdata[12] = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ enum vcap_keyfield_set keysets[10];
+ struct vcap_keyset_list matches;
+ int ret, idx, addr;
+
+ vcap_test_api_init(&admin);
+
+ /* Go from higher to lower addresses searching for a keyset */
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+ for (idx = ARRAY_SIZE(keydata) - 1, addr = 799; idx > 0;
+ --idx, --addr) {
+ admin.cache.keystream = &keydata[idx];
+ admin.cache.maskstream = &mskdata[idx];
+ ret = vcap_addr_keysets(&test_vctrl, &test_netdev, &admin,
+ addr, &matches);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ }
+
+ /* Finally we hit the start of the rule */
+ admin.cache.keystream = &keydata[idx];
+ admin.cache.maskstream = &mskdata[idx];
+ matches.cnt = 0;
+ ret = vcap_addr_keysets(&test_vctrl, &test_netdev, &admin,
+ addr, &matches);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, matches.cnt, 1);
+ KUNIT_EXPECT_EQ(test, matches.keysets[0], VCAP_KFS_MAC_ETYPE);
+}
+
+static void vcap_api_show_admin_raw_test(struct kunit *test)
+{
+ u32 keydata[4] = {
+ 0x40450042, 0x000feaf3, 0x00000003, 0x00050600,
+ };
+ u32 mskdata[4] = {
+ 0x0030ff80, 0xfff00000, 0xfffffffc, 0xfff000ff,
+ };
+ u32 actdata[12] = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ .first_valid_addr = 786,
+ .last_valid_addr = 788,
+ };
+ struct vcap_rule_internal ri = {
+ .ndev = &test_netdev,
+ };
+ struct vcap_output_print out = {
+ .prf = (void *)test_prf,
+ };
+ const char *test_expected =
+ " addr: 786, X6 rule, keysets: VCAP_KFS_MAC_ETYPE\n";
+ int ret;
+
+ vcap_test_api_init(&admin);
+ list_add_tail(&ri.list, &admin.rules);
+
+ ret = vcap_show_admin_raw(&test_vctrl, &admin, &out);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_STREQ(test, test_expected, test_pr_buffer[0]);
+}
+
+static const char * const test_admin_info_expect[] = {
+ "name: is2\n",
+ "rows: 256\n",
+ "sw_count: 12\n",
+ "sw_width: 52\n",
+ "sticky_width: 1\n",
+ "act_width: 110\n",
+ "default_cnt: 73\n",
+ "require_cnt_dis: 0\n",
+ "version: 1\n",
+ "vtype: 2\n",
+ "vinst: 0\n",
+ "first_cid: 10000\n",
+ "last_cid: 19999\n",
+ "lookups: 4\n",
+ "first_valid_addr: 0\n",
+ "last_valid_addr: 3071\n",
+ "last_used_addr: 794\n",
+};
+
+static void vcap_api_show_admin_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 794,
+ };
+ struct vcap_output_print out = {
+ .prf = (void *)test_prf,
+ };
+ int idx;
+
+ vcap_test_api_init(&admin);
+
+ vcap_show_admin_info(&test_vctrl, &admin, &out);
+ for (idx = 0; idx < test_pr_bufferidx; ++idx) {
+ /* pr_info("log[%02d]: %s", idx, test_pr_buffer[idx]); */
+ KUNIT_EXPECT_STREQ(test, test_admin_info_expect[idx],
+ test_pr_buffer[idx]);
+ }
+}
+
+static const char * const test_admin_expect[] = {
+ "name: is2\n",
+ "rows: 256\n",
+ "sw_count: 12\n",
+ "sw_width: 52\n",
+ "sticky_width: 1\n",
+ "act_width: 110\n",
+ "default_cnt: 73\n",
+ "require_cnt_dis: 0\n",
+ "version: 1\n",
+ "vtype: 2\n",
+ "vinst: 0\n",
+ "first_cid: 8000000\n",
+ "last_cid: 8199999\n",
+ "lookups: 4\n",
+ "first_valid_addr: 0\n",
+ "last_valid_addr: 3071\n",
+ "last_used_addr: 794\n",
+ "\n",
+ "rule: 100, addr: [794,799], X6, ctr[0]: 0, hit: 0\n",
+ " chain_id: 0\n",
+ " user: 0\n",
+ " priority: 0\n",
+ " keysets: VCAP_KFS_MAC_ETYPE\n",
+ " keyset_sw: 6\n",
+ " keyset_sw_regs: 2\n",
+ " ETYPE_LEN_IS: W1: 1/1\n",
+ " IF_IGR_PORT_MASK: W32: 0xffabcd01/0xffffffff\n",
+ " IF_IGR_PORT_MASK_RNG: W4: 5/15\n",
+ " L2_DMAC: W48: 01:02:03:04:05:06/ff:ff:ff:ff:ff:ff\n",
+ " L2_PAYLOAD_ETYPE: W64: 0x9000002000000081/0xff000000000000ff\n",
+ " L2_SMAC: W48: b1:9e:34:32:75:88/ff:ff:ff:ff:ff:ff\n",
+ " LOOKUP_FIRST_IS: W1: 1/1\n",
+ " TYPE: W4: 0/15\n",
+ " actionset: VCAP_AFS_BASE_TYPE\n",
+ " actionset_sw: 3\n",
+ " actionset_sw_regs: 4\n",
+ " CNT_ID: W12: 100\n",
+ " MATCH_ID: W16: 1\n",
+ " MATCH_ID_MASK: W16: 1\n",
+ " POLICE_ENA: W1: 1\n",
+ " PORT_MASK: W68: 0x0514670115f3324589\n",
+};
+
+static void vcap_api_show_admin_rule_test(struct kunit *test)
+{
+ u32 keydata[] = {
+ 0x40450042, 0x000feaf3, 0x00000003, 0x00050600,
+ 0x10203040, 0x00075880, 0x633c6864, 0x00040003,
+ 0x00000020, 0x00000008, 0x00000240, 0x00000000,
+ };
+ u32 mskdata[] = {
+ 0x0030ff80, 0xfff00000, 0xfffffffc, 0xfff000ff,
+ 0x00000000, 0xfff00000, 0x00000000, 0xfff3fffc,
+ 0xffffffc0, 0xffffffff, 0xfffffc03, 0xffffffff,
+ };
+ u32 actdata[] = {
+ 0x00040002, 0xf3324589, 0x14670115, 0x00000005,
+ 0x00000000, 0x00100000, 0x06400010, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ };
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .first_cid = 8000000,
+ .last_cid = 8199999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 794,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .data = {
+ .id = 100,
+ .keyset = VCAP_KFS_MAC_ETYPE,
+ .actionset = VCAP_AFS_BASE_TYPE,
+ },
+ .size = 6,
+ .keyset_sw = 6,
+ .keyset_sw_regs = 2,
+ .actionset_sw = 3,
+ .actionset_sw_regs = 4,
+ .addr = 794,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_output_print out = {
+ .prf = (void *)test_prf,
+ };
+ int ret, idx;
+
+ vcap_test_api_init(&admin);
+ list_add_tail(&ri.list, &admin.rules);
+
+ ret = vcap_show_admin(&test_vctrl, &admin, &out);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ for (idx = 0; idx < test_pr_bufferidx; ++idx) {
+ /* pr_info("log[%02d]: %s", idx, test_pr_buffer[idx]); */
+ KUNIT_EXPECT_STREQ(test, test_admin_expect[idx],
+ test_pr_buffer[idx]);
+ }
+}
+
+static struct kunit_case vcap_api_debugfs_test_cases[] = {
+ KUNIT_CASE(vcap_api_addr_keyset_test),
+ KUNIT_CASE(vcap_api_show_admin_raw_test),
+ KUNIT_CASE(vcap_api_show_admin_test),
+ KUNIT_CASE(vcap_api_show_admin_rule_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_debugfs_test_suite = {
+ .name = "VCAP_API_DebugFS_Testsuite",
+ .test_cases = vcap_api_debugfs_test_cases,
+};
+
+kunit_test_suite(vcap_api_debugfs_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
new file mode 100644
index 000000000000..76a31215ebfb
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -0,0 +1,2245 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API kunit test suite
+ */
+
+#include <kunit/test.h>
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_model_kunit.h"
+
+/* First we have the test infrastructure that emulates the platform
+ * implementation
+ */
+#define TEST_BUF_CNT 100
+#define TEST_BUF_SZ 350
+#define STREAMWSIZE 64
+
+static u32 test_updateaddr[STREAMWSIZE] = {};
+static int test_updateaddridx;
+static int test_cache_erase_count;
+static u32 test_init_start;
+static u32 test_init_count;
+static u32 test_hw_counter_id;
+static struct vcap_cache_data test_hw_cache;
+static struct net_device test_netdev = {};
+static int test_move_addr;
+static int test_move_offset;
+static int test_move_count;
+
+/* Callback used by the VCAP API */
+static enum vcap_keyfield_set test_val_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ int idx;
+
+ if (kslist->cnt > 0) {
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_ETAG)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_PURE_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_NORMAL_5TUPLE_IP4)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_NORMAL_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ for (idx = 0; idx < kslist->cnt; idx++) {
+ if (kslist->keysets[idx] == VCAP_KFS_MAC_ETYPE)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_ARP)
+ return kslist->keysets[idx];
+ if (kslist->keysets[idx] == VCAP_KFS_IP_7TUPLE)
+ return kslist->keysets[idx];
+ }
+ break;
+ default:
+ pr_info("%s:%d: no validation for VCAP %d\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Callback used by the VCAP API */
+static void test_add_def_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ if (admin->vinst == 0 || admin->vinst == 2)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_erase(struct vcap_admin *admin)
+{
+ if (test_cache_erase_count) {
+ memset(admin->cache.keystream, 0, test_cache_erase_count);
+ memset(admin->cache.maskstream, 0, test_cache_erase_count);
+ memset(admin->cache.actionstream, 0, test_cache_erase_count);
+ test_cache_erase_count = 0;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_init(struct net_device *ndev, struct vcap_admin *admin,
+ u32 start, u32 count)
+{
+ test_init_start = start;
+ test_init_count = count;
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_read(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ pr_debug("%s:%d: %d %d\n", __func__, __LINE__, start, count);
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before decoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ admin->cache.counter = test_hw_cache.counter;
+ admin->cache.sticky = test_hw_cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_write(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_selection sel, u32 start, u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: keydata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, keystr[idx]);
+ }
+ for (idx = 0; idx < count; ++idx) {
+ /* Invert the mask before encoding starts */
+ mskstr[idx] = ~mskstr[idx];
+ pr_debug("%s:%d: mskdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, mskstr[idx]);
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ actstr = &admin->cache.actionstream[start];
+ for (idx = 0; idx < count; ++idx) {
+ pr_debug("%s:%d: actdata[%02d]: 0x%08x\n", __func__,
+ __LINE__, start + idx, actstr[idx]);
+ }
+ break;
+ case VCAP_SEL_COUNTER:
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ test_hw_counter_id = start;
+ test_hw_cache.counter = admin->cache.counter;
+ test_hw_cache.sticky = admin->cache.sticky;
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ }
+}
+
+/* Callback used by the VCAP API */
+static void test_cache_update(struct net_device *ndev, struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ if (test_updateaddridx < ARRAY_SIZE(test_updateaddr))
+ test_updateaddr[test_updateaddridx] = addr;
+ else
+ pr_err("%s:%d: overflow: %d\n", __func__, __LINE__, test_updateaddridx);
+ test_updateaddridx++;
+}
+
+static void test_cache_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ test_move_addr = addr;
+ test_move_offset = offset;
+ test_move_count = count;
+}
+
+/* Provide port information via a callback interface */
+static int vcap_test_port_info(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+
+static int vcap_test_enable(struct net_device *ndev,
+ struct vcap_admin *admin,
+ bool enable)
+{
+ return 0;
+}
+
+static struct vcap_operations test_callbacks = {
+ .validate_keyset = test_val_keyset,
+ .add_default_fields = test_add_def_fields,
+ .cache_erase = test_cache_erase,
+ .cache_write = test_cache_write,
+ .cache_read = test_cache_read,
+ .init = test_cache_init,
+ .update = test_cache_update,
+ .move = test_cache_move,
+ .port_info = vcap_test_port_info,
+ .enable = vcap_test_enable,
+};
+
+static struct vcap_control test_vctrl = {
+ .vcaps = kunit_test_vcaps,
+ .stats = &kunit_test_vcap_stats,
+ .ops = &test_callbacks,
+};
+
+static void vcap_test_api_init(struct vcap_admin *admin)
+{
+ /* Initialize the shared objects */
+ INIT_LIST_HEAD(&test_vctrl.list);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ list_add_tail(&admin->list, &test_vctrl.list);
+ memset(test_updateaddr, 0, sizeof(test_updateaddr));
+ test_updateaddridx = 0;
+}
+
+/* Helper function to create a rule of a specific size */
+static struct vcap_rule *
+test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
+ u16 priority,
+ int id, int size, int expected_addr)
+{
+ struct vcap_rule *rule;
+ struct vcap_rule_internal *ri;
+ enum vcap_keyfield_set keyset = VCAP_KFS_NO_VALUE;
+ enum vcap_actionfield_set actionset = VCAP_AFS_NO_VALUE;
+ int ret;
+
+ /* init before testing */
+ memset(test_updateaddr, 0, sizeof(test_updateaddr));
+ test_updateaddridx = 0;
+ test_move_addr = 0;
+ test_move_offset = 0;
+ test_move_count = 0;
+
+ switch (size) {
+ case 2:
+ keyset = VCAP_KFS_ETAG;
+ actionset = VCAP_AFS_CLASS_REDUCED;
+ break;
+ case 3:
+ keyset = VCAP_KFS_PURE_5TUPLE_IP4;
+ actionset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case 6:
+ keyset = VCAP_KFS_NORMAL_5TUPLE_IP4;
+ actionset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case 12:
+ keyset = VCAP_KFS_NORMAL_7TUPLE;
+ actionset = VCAP_AFS_FULL;
+ break;
+ default:
+ break;
+ }
+
+ /* Check that a valid size was used */
+ KUNIT_ASSERT_NE(test, VCAP_KFS_NO_VALUE, keyset);
+
+ /* Allocate the rule */
+ rule = vcap_alloc_rule(&test_vctrl, &test_netdev, cid, user, priority,
+ id);
+ KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+
+ ri = (struct vcap_rule_internal *)rule;
+
+ /* Override rule keyset */
+ ret = vcap_set_rule_set_keyset(rule, keyset);
+
+ /* Add rule actions : there must be at least one action */
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_COSID_VAL, 0);
+
+ /* Override rule actionset */
+ ret = vcap_set_rule_set_actionset(rule, actionset);
+
+ ret = vcap_val_rule(rule, ETH_P_ALL);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, keyset, rule->keyset);
+ KUNIT_EXPECT_EQ(test, actionset, rule->actionset);
+ KUNIT_EXPECT_EQ(test, size, ri->size);
+
+ /* Add rule with write callback */
+ ret = vcap_add_rule(rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, expected_addr, ri->addr);
+ return rule;
+}
+
+/* Prepare testing rule deletion */
+static void test_init_rule_deletion(void)
+{
+ test_move_addr = 0;
+ test_move_offset = 0;
+ test_move_count = 0;
+ test_init_start = 0;
+ test_init_count = 0;
+}
+
+/* Define the test cases. */
+
+static void vcap_api_set_bit_1_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter = {
+ .offset = 35,
+ .sw_width = 52,
+ .reg_idx = 1,
+ .reg_bitpos = 20,
+ .tg = NULL,
+ };
+ u32 stream[2] = {0};
+
+ vcap_set_bit(stream, &iter, 1);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)BIT(20), stream[1]);
+}
+
+static void vcap_api_set_bit_0_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter = {
+ .offset = 35,
+ .sw_width = 52,
+ .reg_idx = 2,
+ .reg_bitpos = 11,
+ .tg = NULL,
+ };
+ u32 stream[3] = {~0, ~0, ~0};
+
+ vcap_set_bit(stream, &iter, 0);
+
+ KUNIT_EXPECT_EQ(test, (u32)~0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)~0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)~BIT(11), stream[2]);
+}
+
+static void vcap_api_iterator_init_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 156, .width = 1, .value = 0, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ struct vcap_typegroup typegroups2[] = {
+ { .offset = 0, .width = 3, .value = 4, },
+ { .offset = 49, .width = 2, .value = 0, },
+ { .offset = 98, .width = 2, .value = 0, },
+ };
+
+ vcap_iter_init(&iter, 52, typegroups, 86);
+
+ KUNIT_EXPECT_EQ(test, 52, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 86 + 2, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 4, iter.reg_bitpos);
+
+ vcap_iter_init(&iter, 49, typegroups2, 134);
+
+ KUNIT_EXPECT_EQ(test, 49, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 134 + 7, iter.offset);
+ KUNIT_EXPECT_EQ(test, 5, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos);
+}
+
+static void vcap_api_iterator_next_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 0, },
+ { .offset = 98, .width = 2, .value = 0, },
+ { .offset = 147, .width = 3, .value = 0, },
+ { .offset = 196, .width = 2, .value = 0, },
+ { .offset = 245, .width = 1, .value = 0, },
+ };
+ int idx;
+
+ vcap_iter_init(&iter, 49, typegroups, 86);
+
+ KUNIT_EXPECT_EQ(test, 49, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 86 + 5, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos);
+
+ vcap_iter_next(&iter);
+
+ KUNIT_EXPECT_EQ(test, 91 + 1, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos);
+
+ for (idx = 0; idx < 6; idx++)
+ vcap_iter_next(&iter);
+
+ KUNIT_EXPECT_EQ(test, 92 + 6 + 2, iter.offset);
+ KUNIT_EXPECT_EQ(test, 4, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 2, iter.reg_bitpos);
+}
+
+static void vcap_api_encode_typegroups_test(struct kunit *test)
+{
+ u32 stream[12] = {0};
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 1, },
+ { .offset = 98, .width = 2, .value = 3, },
+ { .offset = 147, .width = 3, .value = 5, },
+ { .offset = 196, .width = 2, .value = 2, },
+ { .offset = 245, .width = 5, .value = 27, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+
+ vcap_encode_typegroups(stream, 49, typegroups, false);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x8, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x1, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x3, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x5, stream[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x2, stream[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[9]);
+ KUNIT_EXPECT_EQ(test, (u32)27, stream[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[11]);
+}
+
+static void vcap_api_encode_bit_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ u32 stream[4] = {0};
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 1, },
+ { .offset = 98, .width = 2, .value = 3, },
+ { .offset = 147, .width = 3, .value = 5, },
+ { .offset = 196, .width = 2, .value = 2, },
+ { .offset = 245, .width = 1, .value = 0, },
+ };
+
+ vcap_iter_init(&iter, 49, typegroups, 44);
+
+ KUNIT_EXPECT_EQ(test, 48, iter.offset);
+ KUNIT_EXPECT_EQ(test, 1, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 16, iter.reg_bitpos);
+
+ vcap_encode_bit(stream, &iter, 1);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)BIT(16), stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]);
+}
+
+static void vcap_api_encode_field_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ u32 stream[16] = {0};
+ struct vcap_typegroup typegroups[] = {
+ { .offset = 0, .width = 4, .value = 8, },
+ { .offset = 49, .width = 1, .value = 1, },
+ { .offset = 98, .width = 2, .value = 3, },
+ { .offset = 147, .width = 3, .value = 5, },
+ { .offset = 196, .width = 2, .value = 2, },
+ { .offset = 245, .width = 5, .value = 27, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 4,
+ };
+ u8 value[] = {0x5};
+
+ vcap_iter_init(&iter, 49, typegroups, rf.offset);
+
+ KUNIT_EXPECT_EQ(test, 91, iter.offset);
+ KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos);
+
+ vcap_encode_field(stream, &iter, rf.width, value);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x5 << 10), stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[4]);
+
+ vcap_encode_typegroups(stream, 49, typegroups, false);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x8, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x1, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x5 << 10), stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x3, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x5, stream[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x2, stream[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[9]);
+ KUNIT_EXPECT_EQ(test, (u32)27, stream[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[11]);
+}
+
+/* In this testcase the subword is smaller than a register */
+static void vcap_api_encode_short_field_test(struct kunit *test)
+{
+ struct vcap_stream_iter iter;
+ int sw_width = 21;
+ u32 stream[6] = {0};
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 3, .value = 7, },
+ { .offset = 21, .width = 2, .value = 3, },
+ { .offset = 42, .width = 1, .value = 1, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 4,
+ };
+ u8 value[] = {0x5};
+
+ vcap_iter_init(&iter, sw_width, tgt, rf.offset);
+
+ KUNIT_EXPECT_EQ(test, 1, iter.regs_per_sw);
+ KUNIT_EXPECT_EQ(test, 21, iter.sw_width);
+ KUNIT_EXPECT_EQ(test, 25 + 3 + 2, iter.offset);
+ KUNIT_EXPECT_EQ(test, 1, iter.reg_idx);
+ KUNIT_EXPECT_EQ(test, 25 + 3 + 2 - sw_width, iter.reg_bitpos);
+
+ vcap_encode_field(stream, &iter, rf.width, value);
+
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x5 << (25 + 3 + 2 - sw_width)), stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, stream[5]);
+
+ vcap_encode_typegroups(stream, sw_width, tgt, false);
+
+ KUNIT_EXPECT_EQ(test, (u32)7, stream[0]);
+ KUNIT_EXPECT_EQ(test, (u32)((0x5 << (25 + 3 + 2 - sw_width)) + 3), stream[1]);
+ KUNIT_EXPECT_EQ(test, (u32)1, stream[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0, stream[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0, stream[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0, stream[5]);
+}
+
+static void vcap_api_encode_keyfield_test(struct kunit *test)
+{
+ u32 keywords[16] = {0};
+ u32 maskwords[16] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keywords,
+ .maskstream = maskwords,
+ .actionstream = keywords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_MAC_ETYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf = {
+ .ctrl.list = {},
+ .ctrl.key = VCAP_KF_ISDX_CLS,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0xeef014a1,
+ .data.u32.mask = 0xfff,
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ };
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 156, .width = 1, .value = 1, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+
+ vcap_test_api_init(&admin);
+ vcap_encode_keyfield(&rule, &ckf, &rf, tgt);
+
+ /* Key */
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x04a1 << 6), keywords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, keywords[6]);
+
+ /* Mask */
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)(0x0fff << 6), maskwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, maskwords[6]);
+}
+
+static void vcap_api_encode_max_keyfield_test(struct kunit *test)
+{
+ int idx;
+ u32 keywords[6] = {0};
+ u32 maskwords[6] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ /* IS2 sw_width = 52 bit */
+ .cache = {
+ .keystream = keywords,
+ .maskstream = maskwords,
+ .actionstream = keywords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_IP_7TUPLE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf = {
+ .ctrl.list = {},
+ .ctrl.key = VCAP_KF_L3_IP6_DIP,
+ .ctrl.type = VCAP_FIELD_U128,
+ .data.u128.value = { 0xa1, 0xa2, 0xa3, 0xa4, 0, 0, 0x43, 0,
+ 0, 0, 0, 0, 0, 0, 0x78, 0x8e, },
+ .data.u128.mask = { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0,
+ 0, 0, 0, 0, 0, 0, 0xff, 0xff },
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U128,
+ .offset = 0,
+ .width = 128,
+ };
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 156, .width = 1, .value = 1, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+ u32 keyres[] = {
+ 0x928e8a84,
+ 0x000c0002,
+ 0x00000010,
+ 0x00000000,
+ 0x0239e000,
+ 0x00000000,
+ };
+ u32 mskres[] = {
+ 0xfffffffc,
+ 0x000c0003,
+ 0x0000003f,
+ 0x00000000,
+ 0x03fffc00,
+ 0x00000000,
+ };
+
+ vcap_encode_keyfield(&rule, &ckf, &rf, tgt);
+
+ /* Key */
+ for (idx = 0; idx < ARRAY_SIZE(keyres); ++idx)
+ KUNIT_EXPECT_EQ(test, keyres[idx], keywords[idx]);
+ /* Mask */
+ for (idx = 0; idx < ARRAY_SIZE(mskres); ++idx)
+ KUNIT_EXPECT_EQ(test, mskres[idx], maskwords[idx]);
+}
+
+static void vcap_api_encode_actionfield_test(struct kunit *test)
+{
+ u32 actwords[16] = {0};
+ int sw_width = 21;
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_ES2, /* act_width = 21 */
+ .cache = {
+ .actionstream = actwords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .actionset = VCAP_AFS_BASE_TYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_actionfield caf = {
+ .ctrl.list = {},
+ .ctrl.action = VCAP_AF_POLICE_IDX,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x67908032,
+ };
+ struct vcap_field rf = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ };
+ struct vcap_typegroup tgt[] = {
+ { .offset = 0, .width = 2, .value = 2, },
+ { .offset = 21, .width = 1, .value = 1, },
+ { .offset = 42, .width = 1, .value = 0, },
+ { .offset = 0, .width = 0, .value = 0, },
+ };
+
+ vcap_encode_actionfield(&rule, &caf, &rf, tgt);
+
+ /* Action */
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)((0x32 << (35 + 2 + 1 - sw_width)) & 0x1fffff), actwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)((0x32 >> ((2 * sw_width) - 38 - 1))), actwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0, actwords[6]);
+}
+
+static void vcap_api_keyfield_typegroup_test(struct kunit *test)
+{
+ const struct vcap_typegroup *tg;
+
+ tg = vcap_keyfield_typegroup(&test_vctrl, VCAP_TYPE_IS2, VCAP_KFS_MAC_ETYPE);
+ KUNIT_EXPECT_PTR_NE(test, NULL, tg);
+ KUNIT_EXPECT_EQ(test, 0, tg[0].offset);
+ KUNIT_EXPECT_EQ(test, 2, tg[0].width);
+ KUNIT_EXPECT_EQ(test, 2, tg[0].value);
+ KUNIT_EXPECT_EQ(test, 156, tg[1].offset);
+ KUNIT_EXPECT_EQ(test, 1, tg[1].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[1].value);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].offset);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].value);
+
+ tg = vcap_keyfield_typegroup(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_LL_FULL);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, tg);
+}
+
+static void vcap_api_actionfield_typegroup_test(struct kunit *test)
+{
+ const struct vcap_typegroup *tg;
+
+ tg = vcap_actionfield_typegroup(&test_vctrl, VCAP_TYPE_IS0, VCAP_AFS_FULL);
+ KUNIT_EXPECT_PTR_NE(test, NULL, tg);
+ KUNIT_EXPECT_EQ(test, 0, tg[0].offset);
+ KUNIT_EXPECT_EQ(test, 3, tg[0].width);
+ KUNIT_EXPECT_EQ(test, 4, tg[0].value);
+ KUNIT_EXPECT_EQ(test, 110, tg[1].offset);
+ KUNIT_EXPECT_EQ(test, 2, tg[1].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[1].value);
+ KUNIT_EXPECT_EQ(test, 220, tg[2].offset);
+ KUNIT_EXPECT_EQ(test, 2, tg[2].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[2].value);
+ KUNIT_EXPECT_EQ(test, 0, tg[3].offset);
+ KUNIT_EXPECT_EQ(test, 0, tg[3].width);
+ KUNIT_EXPECT_EQ(test, 0, tg[3].value);
+
+ tg = vcap_actionfield_typegroup(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_CLASSIFICATION);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, tg);
+}
+
+static void vcap_api_vcap_keyfields_test(struct kunit *test)
+{
+ const struct vcap_field *ft;
+
+ ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_KFS_MAC_ETYPE);
+ KUNIT_EXPECT_PTR_NE(test, NULL, ft);
+
+ /* Keyset that is not available and within the maximum keyset enum value */
+ ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_PURE_5TUPLE_IP4);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+
+ /* Keyset that is not available and beyond the maximum keyset enum value */
+ ft = vcap_keyfields(&test_vctrl, VCAP_TYPE_ES2, VCAP_KFS_LL_FULL);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+}
+
+static void vcap_api_vcap_actionfields_test(struct kunit *test)
+{
+ const struct vcap_field *ft;
+
+ ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS0, VCAP_AFS_FULL);
+ KUNIT_EXPECT_PTR_NE(test, NULL, ft);
+
+ ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_FULL);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+
+ ft = vcap_actionfields(&test_vctrl, VCAP_TYPE_IS2, VCAP_AFS_CLASSIFICATION);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ft);
+}
+
+static void vcap_api_encode_rule_keyset_test(struct kunit *test)
+{
+ u32 keywords[16] = {0};
+ u32 maskwords[16] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .keystream = keywords,
+ .maskstream = maskwords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_MAC_ETYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x00,
+ .data.u32.mask = 0x0f,
+ },
+ {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ .ctrl.type = VCAP_FIELD_BIT,
+ .data.u1.value = 0x01,
+ .data.u1.mask = 0x01,
+ },
+ {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_L3,
+ .ctrl.type = VCAP_FIELD_BIT,
+ .data.u1.value = 0x00,
+ .data.u1.mask = 0x01,
+ },
+ {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x00,
+ .data.u32.mask = 0x0f,
+ },
+ {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK,
+ .ctrl.type = VCAP_FIELD_U72,
+ .data.u72.value = {0x0, 0x00, 0x00, 0x00},
+ .data.u72.mask = {0xfd, 0xff, 0xff, 0xff},
+ },
+ {
+ .ctrl.key = VCAP_KF_L2_DMAC,
+ .ctrl.type = VCAP_FIELD_U48,
+ /* Opposite endianness */
+ .data.u48.value = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
+ .data.u48.mask = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ },
+ {
+ .ctrl.key = VCAP_KF_ETYPE_LEN_IS,
+ .ctrl.type = VCAP_FIELD_BIT,
+ .data.u1.value = 0x01,
+ .data.u1.mask = 0x01,
+ },
+ {
+ .ctrl.key = VCAP_KF_ETYPE,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0xaabb,
+ .data.u32.mask = 0xffff,
+ },
+ };
+ int idx;
+ int ret;
+
+ /* Empty entry list */
+ INIT_LIST_HEAD(&rule.data.keyfields);
+ ret = vcap_encode_rule_keyset(&rule);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &rule.data.keyfields);
+ ret = vcap_encode_rule_keyset(&rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* The key and mask values below are from an actual Sparx5 rule config */
+ /* Key */
+ KUNIT_EXPECT_EQ(test, (u32)0x00000042, keywords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00020100, keywords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x60504030, keywords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x0002aaee, keywords[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[9]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, keywords[11]);
+
+ /* Mask: they will be inverted when applied to the register */
+ KUNIT_EXPECT_EQ(test, (u32)~0x00b07f80, maskwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfff00000, maskwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffffffc, maskwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfff000ff, maskwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)~0x00000000, maskwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffffff0, maskwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffffffe, maskwords[6]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xfffc0001, maskwords[7]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[8]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[9]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[10]);
+ KUNIT_EXPECT_EQ(test, (u32)~0xffffffff, maskwords[11]);
+}
+
+static void vcap_api_encode_rule_actionset_test(struct kunit *test)
+{
+ u32 actwords[16] = {0};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .cache = {
+ .actionstream = actwords,
+ },
+ };
+ struct vcap_rule_internal rule = {
+ .admin = &admin,
+ .data = {
+ .actionset = VCAP_AFS_BASE_TYPE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_actionfield caf[] = {
+ {
+ .ctrl.action = VCAP_AF_MATCH_ID,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x01,
+ },
+ {
+ .ctrl.action = VCAP_AF_MATCH_ID_MASK,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x01,
+ },
+ {
+ .ctrl.action = VCAP_AF_CNT_ID,
+ .ctrl.type = VCAP_FIELD_U32,
+ .data.u32.value = 0x64,
+ },
+ };
+ int idx;
+ int ret;
+
+ /* Empty entry list */
+ INIT_LIST_HEAD(&rule.data.actionfields);
+ ret = vcap_encode_rule_actionset(&rule);
+ /* We allow rules with no actions */
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ for (idx = 0; idx < ARRAY_SIZE(caf); idx++)
+ list_add_tail(&caf[idx].ctrl.list, &rule.data.actionfields);
+ ret = vcap_encode_rule_actionset(&rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* The action values below are from an actual Sparx5 rule config */
+ KUNIT_EXPECT_EQ(test, (u32)0x00000002, actwords[0]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[1]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[2]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[3]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[4]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00100000, actwords[5]);
+ KUNIT_EXPECT_EQ(test, (u32)0x06400010, actwords[6]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[7]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[8]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[9]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[10]);
+ KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
+}
+
+static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .data = {
+ .keyset = VCAP_KFS_NO_VALUE,
+ },
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_rule *rule = (struct vcap_rule *)&ri;
+ struct vcap_client_keyfield *kf;
+ int ret;
+ struct vcap_u128_key dip = {
+ .value = {0x17, 0x26, 0x35, 0x44, 0x63, 0x62, 0x71},
+ .mask = {0xf1, 0xf2, 0xf3, 0xf4, 0x4f, 0x3f, 0x2f, 0x1f},
+ };
+ int idx;
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
+ KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
+ KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_ANY);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_LOOKUP_FIRST_IS, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
+ KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_TYPE, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
+ KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
+
+ INIT_LIST_HEAD(&rule->keyfields);
+ ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ kf = list_first_entry(&rule->keyfields, struct vcap_client_keyfield,
+ ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_KF_L3_IP6_SIP, kf->ctrl.key);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U128, kf->ctrl.type);
+ for (idx = 0; idx < ARRAY_SIZE(dip.value); ++idx)
+ KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
+ for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
+ KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
+}
+
+static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .data = {
+ .actionset = VCAP_AFS_NO_VALUE,
+ },
+ };
+ struct vcap_rule *rule = (struct vcap_rule *)&ri;
+ struct vcap_client_actionfield *af;
+ int ret;
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_0);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
+
+ INIT_LIST_HEAD(&rule->actionfields);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ af = list_first_entry(&rule->actionfields,
+ struct vcap_client_actionfield, ctrl.list);
+ KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
+ KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
+ KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
+}
+
+static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
+{
+ struct vcap_keyset_list matches = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ }, {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ }, {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_L3,
+ }, {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ }, {
+ .ctrl.key = VCAP_KF_IF_IGR_PORT_MASK,
+ }, {
+ .ctrl.key = VCAP_KF_L2_DMAC,
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE_LEN_IS,
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE,
+ },
+ };
+ int idx;
+ bool ret;
+ enum vcap_keyfield_set keysets[10] = {};
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
+
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
+
+ KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, 1, matches.cnt);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, matches.keysets[0]);
+}
+
+static void vcap_api_rule_find_keyset_failed_test(struct kunit *test)
+{
+ struct vcap_keyset_list matches = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ }, {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ }, {
+ .ctrl.key = VCAP_KF_ARP_OPCODE,
+ }, {
+ .ctrl.key = VCAP_KF_L3_IP4_SIP,
+ }, {
+ .ctrl.key = VCAP_KF_L3_IP4_DIP,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_PCP_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE_LEN_IS, /* Not with ARP */
+ }, {
+ .ctrl.key = VCAP_KF_ETYPE, /* Not with ARP */
+ },
+ };
+ int idx;
+ bool ret;
+ enum vcap_keyfield_set keysets[10] = {};
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
+
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
+
+ KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, 0, matches.cnt);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_NO_VALUE, matches.keysets[0]);
+}
+
+static void vcap_api_rule_find_keyset_many_test(struct kunit *test)
+{
+ struct vcap_keyset_list matches = {};
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_client_keyfield ckf[] = {
+ {
+ .ctrl.key = VCAP_KF_TYPE,
+ }, {
+ .ctrl.key = VCAP_KF_LOOKUP_FIRST_IS,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_DEI_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_PCP_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_8021Q_VID_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_ISDX_CLS,
+ }, {
+ .ctrl.key = VCAP_KF_L2_MC_IS,
+ }, {
+ .ctrl.key = VCAP_KF_L2_BC_IS,
+ },
+ };
+ int idx;
+ bool ret;
+ enum vcap_keyfield_set keysets[10] = {};
+
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
+ list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
+
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
+
+ KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, 6, matches.cnt);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_ARP, matches.keysets[0]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP4_OTHER, matches.keysets[1]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP4_TCP_UDP, matches.keysets[2]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP6_STD, matches.keysets[3]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_IP_7TUPLE, matches.keysets[4]);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, matches.keysets[5]);
+}
+
+static void vcap_api_encode_rule_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin is2_admin = {
+ .vtype = VCAP_TYPE_IS2,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ struct vcap_rule *rule;
+ struct vcap_rule_internal *ri;
+ int vcap_chain_id = 10005;
+ enum vcap_user user = VCAP_USER_VCAP_UTIL;
+ u16 priority = 10;
+ int id = 100;
+ int ret;
+ struct vcap_u48_key smac = {
+ .value = { 0x88, 0x75, 0x32, 0x34, 0x9e, 0xb1 },
+ .mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ struct vcap_u48_key dmac = {
+ .value = { 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 },
+ .mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ u32 port_mask_rng_value = 0x05;
+ u32 port_mask_rng_mask = 0x0f;
+ u32 igr_port_mask_value = 0xffabcd01;
+ u32 igr_port_mask_mask = ~0;
+ /* counter is not written yet, so it is not in expwriteaddr */
+ u32 expwriteaddr[] = {792, 793, 794, 795, 796, 797, 0};
+ int idx;
+
+ vcap_test_api_init(&is2_admin);
+
+ /* Allocate the rule */
+ rule = vcap_alloc_rule(&test_vctrl, &test_netdev, vcap_chain_id, user,
+ priority, id);
+ KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+ ri = (struct vcap_rule_internal *)rule;
+
+ /* Add rule keys */
+ ret = vcap_rule_add_key_u48(rule, VCAP_KF_L2_DMAC, &dmac);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_u48(rule, VCAP_KF_L2_SMAC, &smac);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_ETYPE_LEN_IS, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /* Cannot add the same field twice */
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_ETYPE_LEN_IS, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ ret = vcap_rule_add_key_bit(rule, VCAP_KF_IF_IGR_PORT_MASK_L3,
+ VCAP_BIT_ANY);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ port_mask_rng_value, port_mask_rng_mask);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ igr_port_mask_value, igr_port_mask_mask);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Add rule actions */
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_CNT_ID, id);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_MATCH_ID, 1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_MATCH_ID_MASK, 1);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* For now the actionset is hardcoded */
+ ret = vcap_set_rule_set_actionset(rule, VCAP_AFS_BASE_TYPE);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Validation with validate keyset callback */
+ ret = vcap_val_rule(rule, ETH_P_ALL);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, rule->keyset);
+ KUNIT_EXPECT_EQ(test, VCAP_AFS_BASE_TYPE, rule->actionset);
+ KUNIT_EXPECT_EQ(test, 6, ri->size);
+ KUNIT_EXPECT_EQ(test, 2, ri->keyset_sw_regs);
+ KUNIT_EXPECT_EQ(test, 4, ri->actionset_sw_regs);
+
+ /* Add rule with write callback */
+ ret = vcap_add_rule(rule);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 792, is2_admin.last_used_addr);
+ for (idx = 0; idx < ARRAY_SIZE(expwriteaddr); ++idx)
+ KUNIT_EXPECT_EQ(test, expwriteaddr[idx], test_updateaddr[idx]);
+
+ /* Check that the rule has been added */
+ ret = list_empty(&is2_admin.rules);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ vcap_free_rule(rule);
+
+ /* Check that the rule has been freed: tricky to access since this
+ * memory should not be accessible anymore
+ */
+ KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+ ret = list_empty(&rule->keyfields);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = list_empty(&rule->actionfields);
+ KUNIT_EXPECT_EQ(test, true, ret);
+}
+
+static void vcap_api_set_rule_counter_test(struct kunit *test)
+{
+ struct vcap_admin is2_admin = {
+ .cache = {
+ .counter = 100,
+ .sticky = true,
+ },
+ };
+ struct vcap_rule_internal ri = {
+ .data = {
+ .id = 1001,
+ },
+ .addr = 600,
+ .admin = &is2_admin,
+ .counter_id = 1002,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_rule_internal ri2 = {
+ .data = {
+ .id = 2001,
+ },
+ .addr = 700,
+ .admin = &is2_admin,
+ .counter_id = 2002,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_counter ctr = { .value = 0, .sticky = false};
+ struct vcap_counter ctr2 = { .value = 101, .sticky = true};
+ int ret;
+
+ vcap_test_api_init(&is2_admin);
+ list_add_tail(&ri.list, &is2_admin.rules);
+ list_add_tail(&ri2.list, &is2_admin.rules);
+
+ pr_info("%s:%d\n", __func__, __LINE__);
+ ret = vcap_rule_set_counter(&ri.data, &ctr);
+ pr_info("%s:%d\n", __func__, __LINE__);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 1002, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 0, test_hw_cache.counter);
+ KUNIT_EXPECT_EQ(test, false, test_hw_cache.sticky);
+ KUNIT_EXPECT_EQ(test, 600, test_updateaddr[0]);
+
+ ret = vcap_rule_set_counter(&ri2.data, &ctr2);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 2002, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 101, test_hw_cache.counter);
+ KUNIT_EXPECT_EQ(test, true, test_hw_cache.sticky);
+ KUNIT_EXPECT_EQ(test, 700, test_updateaddr[1]);
+}
+
+static void vcap_api_get_rule_counter_test(struct kunit *test)
+{
+ struct vcap_admin is2_admin = {
+ .cache = {
+ .counter = 100,
+ .sticky = true,
+ },
+ };
+ struct vcap_rule_internal ri = {
+ .data = {
+ .id = 1010,
+ },
+ .addr = 400,
+ .admin = &is2_admin,
+ .counter_id = 1011,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_rule_internal ri2 = {
+ .data = {
+ .id = 2011,
+ },
+ .addr = 300,
+ .admin = &is2_admin,
+ .counter_id = 2012,
+ .vctrl = &test_vctrl,
+ };
+ struct vcap_counter ctr = {};
+ struct vcap_counter ctr2 = {};
+ int ret;
+
+ vcap_test_api_init(&is2_admin);
+ test_hw_cache.counter = 55;
+ test_hw_cache.sticky = true;
+
+ list_add_tail(&ri.list, &is2_admin.rules);
+ list_add_tail(&ri2.list, &is2_admin.rules);
+
+ ret = vcap_rule_get_counter(&ri.data, &ctr);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 1011, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 55, ctr.value);
+ KUNIT_EXPECT_EQ(test, true, ctr.sticky);
+ KUNIT_EXPECT_EQ(test, 400, test_updateaddr[0]);
+
+ test_hw_cache.counter = 22;
+ test_hw_cache.sticky = false;
+
+ ret = vcap_rule_get_counter(&ri2.data, &ctr2);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ KUNIT_EXPECT_EQ(test, 2012, test_hw_counter_id);
+ KUNIT_EXPECT_EQ(test, 22, ctr2.value);
+ KUNIT_EXPECT_EQ(test, false, ctr2.sticky);
+ KUNIT_EXPECT_EQ(test, 300, test_updateaddr[1]);
+}
+
+static void vcap_api_rule_insert_in_order_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+
+ vcap_test_api_init(&admin);
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+}
+
+static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ struct vcap_rule_internal *elem;
+ u32 exp_addr[] = {780, 774, 771, 768, 767};
+ int idx;
+
+ vcap_test_api_init(&admin);
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 200, 2, 798);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 795);
+ KUNIT_EXPECT_EQ(test, 6, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 3, test_move_count);
+ KUNIT_EXPECT_EQ(test, 798, test_move_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 400, 6, 792);
+ KUNIT_EXPECT_EQ(test, 6, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 6, test_move_count);
+ KUNIT_EXPECT_EQ(test, 792, test_move_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 50, 500, 12, 780);
+ KUNIT_EXPECT_EQ(test, 18, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 12, test_move_count);
+ KUNIT_EXPECT_EQ(test, 786, test_move_addr);
+
+ idx = 0;
+ list_for_each_entry(elem, &admin.rules, list) {
+ KUNIT_EXPECT_EQ(test, exp_addr[idx], elem->addr);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr);
+}
+
+static void vcap_api_rule_remove_at_end_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .last_valid_addr = 3071,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ int ret;
+
+ vcap_test_api_init(&admin);
+ test_init_rule_deletion();
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ /* Remove rules again from the end */
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 768, test_init_start);
+ KUNIT_EXPECT_EQ(test, 2, test_init_count);
+ KUNIT_EXPECT_EQ(test, 771, admin.last_used_addr);
+
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 771, test_init_start);
+ KUNIT_EXPECT_EQ(test, 3, test_init_count);
+ KUNIT_EXPECT_EQ(test, 774, admin.last_used_addr);
+
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 774, test_init_start);
+ KUNIT_EXPECT_EQ(test, 6, test_init_count);
+ KUNIT_EXPECT_EQ(test, 780, admin.last_used_addr);
+
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 780, test_init_start);
+ KUNIT_EXPECT_EQ(test, 12, test_init_count);
+ KUNIT_EXPECT_EQ(test, 3072, admin.last_used_addr);
+}
+
+static void vcap_api_rule_remove_in_middle_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .last_valid_addr = 800 - 1,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ int ret;
+
+ vcap_test_api_init(&admin);
+
+ /* Create rules with different sizes and check that they are placed
+ * at the correct address in the VCAP according to size
+ */
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ /* Remove rules in the middle */
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 768, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -6, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 6, test_move_count);
+ KUNIT_EXPECT_EQ(test, 768, test_init_start);
+ KUNIT_EXPECT_EQ(test, 6, test_init_count);
+ KUNIT_EXPECT_EQ(test, 774, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 774, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -4, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 2, test_move_count);
+ KUNIT_EXPECT_EQ(test, 774, test_init_start);
+ KUNIT_EXPECT_EQ(test, 4, test_init_count);
+ KUNIT_EXPECT_EQ(test, 778, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 778, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -20, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 2, test_move_count);
+ KUNIT_EXPECT_EQ(test, 778, test_init_start);
+ KUNIT_EXPECT_EQ(test, 20, test_init_count);
+ KUNIT_EXPECT_EQ(test, 798, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 798, test_init_start);
+ KUNIT_EXPECT_EQ(test, 2, test_init_count);
+ KUNIT_EXPECT_EQ(test, 800, admin.last_used_addr);
+}
+
+static void vcap_api_rule_remove_in_front_test(struct kunit *test)
+{
+ /* Data used by VCAP Library callback */
+ static u32 keydata[32] = {};
+ static u32 mskdata[32] = {};
+ static u32 actdata[32] = {};
+
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ .first_cid = 10000,
+ .last_cid = 19999,
+ .lookups = 4,
+ .first_valid_addr = 0,
+ .last_used_addr = 800,
+ .last_valid_addr = 800 - 1,
+ .cache = {
+ .keystream = keydata,
+ .maskstream = mskdata,
+ .actionstream = actdata,
+ },
+ };
+ int ret;
+
+ vcap_test_api_init(&admin);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 10, 500, 12, 780);
+ KUNIT_EXPECT_EQ(test, 780, admin.last_used_addr);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, test_move_addr);
+ KUNIT_EXPECT_EQ(test, 0, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 0, test_move_count);
+ KUNIT_EXPECT_EQ(test, 780, test_init_start);
+ KUNIT_EXPECT_EQ(test, 12, test_init_count);
+ KUNIT_EXPECT_EQ(test, 800, admin.last_used_addr);
+
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 792);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 789);
+ test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 786);
+
+ test_init_rule_deletion();
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 786, test_move_addr);
+ KUNIT_EXPECT_EQ(test, -8, test_move_offset);
+ KUNIT_EXPECT_EQ(test, 6, test_move_count);
+ KUNIT_EXPECT_EQ(test, 786, test_init_start);
+ KUNIT_EXPECT_EQ(test, 8, test_init_count);
+ KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr);
+}
+
+static struct kunit_case vcap_api_rule_remove_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_remove_at_end_test),
+ KUNIT_CASE(vcap_api_rule_remove_in_middle_test),
+ KUNIT_CASE(vcap_api_rule_remove_in_front_test),
+ {}
+};
+
+static void vcap_api_next_lookup_basic_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 0,
+ .first_cid = 8000000,
+ .last_cid = 8199999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_admin admin2 = {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 1,
+ .first_cid = 8200000,
+ .last_cid = 8399999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ };
+ bool ret;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&admin2.list, &test_vctrl.list);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 1001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8100000, 8101000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8100000, 8201000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8200000, 8201000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8200000, 8301000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+}
+
+static void vcap_api_next_lookup_advanced_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 0,
+ .first_cid = 1000000,
+ .last_cid = 1199999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_admin admin2 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 1,
+ .first_cid = 1200000,
+ .last_cid = 1399999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_admin admin3 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 2,
+ .first_cid = 1400000,
+ .last_cid = 1599999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_admin admin4 = {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 0,
+ .first_cid = 8000000,
+ .last_cid = 8199999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_admin admin5 = {
+ .vtype = VCAP_TYPE_IS2,
+ .vinst = 1,
+ .first_cid = 8200000,
+ .last_cid = 8399999,
+ .lookups = 4,
+ .lookups_per_instance = 2,
+ };
+ bool ret;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&admin2.list, &test_vctrl.list);
+ list_add_tail(&admin3.list, &test_vctrl.list);
+ list_add_tail(&admin4.list, &test_vctrl.list);
+ list_add_tail(&admin5.list, &test_vctrl.list);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1201000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1301000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1100000, 8101000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1300000, 1401000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1400000, 1501000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 1500000, 8001000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8001000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8000000, 8101000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+ ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+}
+
+static void vcap_api_filter_unsupported_keys_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ .data.keyset = VCAP_KFS_MAC_ETYPE,
+ };
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_ARP_ADDR_SPACE_OK_IS, /* arp keys are not in keyset */
+ VCAP_KF_ARP_PROTO_SPACE_OK_IS,
+ VCAP_KF_ARP_LEN_OK_IS,
+ VCAP_KF_ARP_TGT_MATCH_IS,
+ VCAP_KF_ARP_SENDER_MATCH_IS,
+ VCAP_KF_ARP_OPCODE_UNKNOWN_IS,
+ VCAP_KF_ARP_OPCODE,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ };
+ enum vcap_key_field expected[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ };
+ struct vcap_client_keyfield *ckf, *next;
+ bool ret;
+ int idx;
+
+ /* Add all keys to the rule */
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(keylist); idx++) {
+ ckf = kzalloc(sizeof(*ckf), GFP_KERNEL);
+ if (ckf) {
+ ckf->ctrl.key = keylist[idx];
+ list_add_tail(&ckf->ctrl.list, &ri.data.keyfields);
+ }
+ }
+
+ KUNIT_EXPECT_EQ(test, 14, ARRAY_SIZE(keylist));
+
+ /* Drop unsupported keys from the rule */
+ ret = vcap_filter_rule_keys(&ri.data, NULL, 0, true);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Check remaining keys in the rule */
+ idx = 0;
+ list_for_each_entry_safe(ckf, next, &ri.data.keyfields, ctrl.list) {
+ KUNIT_EXPECT_EQ(test, expected[idx], ckf->ctrl.key);
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 7, idx);
+}
+
+static void vcap_api_filter_keylist_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ .data.keyset = VCAP_KFS_NORMAL_7TUPLE,
+ };
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TCP_IS,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_RNG,
+ };
+ enum vcap_key_field droplist[] = {
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_RNG,
+ };
+ enum vcap_key_field expected[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TCP_IS,
+ };
+ struct vcap_client_keyfield *ckf, *next;
+ bool ret;
+ int idx;
+
+ /* Add all keys to the rule */
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(keylist); idx++) {
+ ckf = kzalloc(sizeof(*ckf), GFP_KERNEL);
+ if (ckf) {
+ ckf->ctrl.key = keylist[idx];
+ list_add_tail(&ckf->ctrl.list, &ri.data.keyfields);
+ }
+ }
+
+ KUNIT_EXPECT_EQ(test, 38, ARRAY_SIZE(keylist));
+
+ /* Drop listed keys from the rule */
+ ret = vcap_filter_rule_keys(&ri.data, droplist, ARRAY_SIZE(droplist),
+ false);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Check remaining keys in the rule */
+ idx = 0;
+ list_for_each_entry_safe(ckf, next, &ri.data.keyfields, ctrl.list) {
+ KUNIT_EXPECT_EQ(test, expected[idx], ckf->ctrl.key);
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 26, idx);
+}
+
+static struct kunit_suite vcap_api_rule_remove_test_suite = {
+ .name = "VCAP_API_Rule_Remove_Testsuite",
+ .test_cases = vcap_api_rule_remove_test_cases,
+};
+
+static struct kunit_case vcap_api_rule_insert_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_insert_in_order_test),
+ KUNIT_CASE(vcap_api_rule_insert_reverse_order_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_insert_test_suite = {
+ .name = "VCAP_API_Rule_Insert_Testsuite",
+ .test_cases = vcap_api_rule_insert_test_cases,
+};
+
+static struct kunit_case vcap_api_rule_counter_test_cases[] = {
+ KUNIT_CASE(vcap_api_set_rule_counter_test),
+ KUNIT_CASE(vcap_api_get_rule_counter_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_counter_test_suite = {
+ .name = "VCAP_API_Rule_Counter_Testsuite",
+ .test_cases = vcap_api_rule_counter_test_cases,
+};
+
+static struct kunit_case vcap_api_support_test_cases[] = {
+ KUNIT_CASE(vcap_api_next_lookup_basic_test),
+ KUNIT_CASE(vcap_api_next_lookup_advanced_test),
+ KUNIT_CASE(vcap_api_filter_unsupported_keys_test),
+ KUNIT_CASE(vcap_api_filter_keylist_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_support_test_suite = {
+ .name = "VCAP_API_Support_Testsuite",
+ .test_cases = vcap_api_support_test_cases,
+};
+
+static struct kunit_case vcap_api_full_rule_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_find_keyset_basic_test),
+ KUNIT_CASE(vcap_api_rule_find_keyset_failed_test),
+ KUNIT_CASE(vcap_api_rule_find_keyset_many_test),
+ KUNIT_CASE(vcap_api_encode_rule_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_full_rule_test_suite = {
+ .name = "VCAP_API_Full_Rule_Testsuite",
+ .test_cases = vcap_api_full_rule_test_cases,
+};
+
+static struct kunit_case vcap_api_rule_value_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_add_keyvalue_test),
+ KUNIT_CASE(vcap_api_rule_add_actionvalue_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_value_test_suite = {
+ .name = "VCAP_API_Rule_Value_Testsuite",
+ .test_cases = vcap_api_rule_value_test_cases,
+};
+
+static struct kunit_case vcap_api_encoding_test_cases[] = {
+ KUNIT_CASE(vcap_api_set_bit_1_test),
+ KUNIT_CASE(vcap_api_set_bit_0_test),
+ KUNIT_CASE(vcap_api_iterator_init_test),
+ KUNIT_CASE(vcap_api_iterator_next_test),
+ KUNIT_CASE(vcap_api_encode_typegroups_test),
+ KUNIT_CASE(vcap_api_encode_bit_test),
+ KUNIT_CASE(vcap_api_encode_field_test),
+ KUNIT_CASE(vcap_api_encode_short_field_test),
+ KUNIT_CASE(vcap_api_encode_keyfield_test),
+ KUNIT_CASE(vcap_api_encode_max_keyfield_test),
+ KUNIT_CASE(vcap_api_encode_actionfield_test),
+ KUNIT_CASE(vcap_api_keyfield_typegroup_test),
+ KUNIT_CASE(vcap_api_actionfield_typegroup_test),
+ KUNIT_CASE(vcap_api_vcap_keyfields_test),
+ KUNIT_CASE(vcap_api_vcap_actionfields_test),
+ KUNIT_CASE(vcap_api_encode_rule_keyset_test),
+ KUNIT_CASE(vcap_api_encode_rule_actionset_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_encoding_test_suite = {
+ .name = "VCAP_API_Encoding_Testsuite",
+ .test_cases = vcap_api_encoding_test_cases,
+};
+
+kunit_test_suite(vcap_api_rule_remove_test_suite);
+kunit_test_suite(vcap_api_rule_insert_test_suite);
+kunit_test_suite(vcap_api_rule_counter_test_suite);
+kunit_test_suite(vcap_api_support_test_suite);
+kunit_test_suite(vcap_api_full_rule_test_suite);
+kunit_test_suite(vcap_api_rule_value_test_suite);
+kunit_test_suite(vcap_api_encoding_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
new file mode 100644
index 000000000000..4fd21da97679
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+#ifndef __VCAP_API_PRIVATE__
+#define __VCAP_API_PRIVATE__
+
+#include <linux/types.h>
+
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define to_intrule(rule) container_of((rule), struct vcap_rule_internal, data)
+
+/* Private VCAP API rule data */
+struct vcap_rule_internal {
+ struct vcap_rule data; /* provided by the client */
+ struct list_head list; /* the vcap admin list of rules */
+ struct vcap_admin *admin; /* vcap hw instance */
+ struct net_device *ndev; /* the interface that the rule applies to */
+ struct vcap_control *vctrl; /* the client control */
+ u32 sort_key; /* defines the position in the VCAP */
+ int keyset_sw; /* subwords in a keyset */
+ int actionset_sw; /* subwords in an actionset */
+ int keyset_sw_regs; /* registers in a subword in an keyset */
+ int actionset_sw_regs; /* registers in a subword in an actionset */
+ int size; /* the size of the rule: max(entry, action) */
+ u32 addr; /* address in the VCAP at insertion */
+ u32 counter_id; /* counter id (if a dedicated counter is available) */
+ struct vcap_counter counter; /* last read counter value */
+};
+
+/* Bit iterator for the VCAP cache streams */
+struct vcap_stream_iter {
+ u32 offset; /* bit offset from the stream start */
+ u32 sw_width; /* subword width in bits */
+ u32 regs_per_sw; /* registers per subword */
+ u32 reg_idx; /* current register index */
+ u32 reg_bitpos; /* bit offset in current register */
+ const struct vcap_typegroup *tg; /* current typegroup */
+};
+
+/* Check that the control has a valid set of callbacks */
+int vcap_api_check(struct vcap_control *ctrl);
+/* Make a shallow copy of the rule without the fields */
+struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri);
+/* Erase the VCAP cache area used or encoding and decoding */
+void vcap_erase_cache(struct vcap_rule_internal *ri);
+
+/* Iterator functionality */
+
+void vcap_iter_init(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset);
+void vcap_iter_next(struct vcap_stream_iter *itr);
+void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
+ const struct vcap_typegroup *tg, u32 offset);
+void vcap_iter_update(struct vcap_stream_iter *itr);
+
+/* Keyset and keyfield functionality */
+
+/* Return the number of keyfields in the keyset */
+int vcap_keyfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset);
+/* Return the typegroup table for the matching keyset (using subword size) */
+const struct vcap_typegroup *
+vcap_keyfield_typegroup(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_keyfield_set keyset);
+/* Return the list of keyfields for the keyset */
+const struct vcap_field *vcap_keyfields(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset);
+
+/* Actionset and actionfield functionality */
+
+/* Return the actionset information for the actionset */
+const struct vcap_set *
+vcap_actionfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset);
+/* Return the number of actionfields in the actionset */
+int vcap_actionfield_count(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_actionfield_set actionset);
+/* Return the typegroup table for the matching actionset (using subword size) */
+const struct vcap_typegroup *
+vcap_actionfield_typegroup(struct vcap_control *vctrl, enum vcap_type vt,
+ enum vcap_actionfield_set actionset);
+/* Return the list of actionfields for the actionset */
+const struct vcap_field *
+vcap_actionfields(struct vcap_control *vctrl,
+ enum vcap_type vt, enum vcap_actionfield_set actionset);
+/* Map actionset id to a string with the actionset name */
+const char *vcap_actionset_name(struct vcap_control *vctrl,
+ enum vcap_actionfield_set actionset);
+/* Map key field id to a string with the key name */
+const char *vcap_actionfield_name(struct vcap_control *vctrl,
+ enum vcap_action_field action);
+
+/* Read key data from a VCAP address and discover if there are any rule keysets
+ * here
+ */
+int vcap_addr_keysets(struct vcap_control *vctrl, struct net_device *ndev,
+ struct vcap_admin *admin, int addr,
+ struct vcap_keyset_list *kslist);
+
+/* Verify that the typegroup information, subword count, keyset and type id
+ * are in sync and correct, return the list of matchin keysets
+ */
+int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
+ u32 *keystream, u32 *mskstream, bool mask,
+ int sw_max, struct vcap_keyset_list *kslist);
+
+#endif /* __VCAP_API_PRIVATE__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
new file mode 100644
index 000000000000..5d681d2697cd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
@@ -0,0 +1,5570 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API Test VCAP Model Data
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "vcap_api.h"
+#include "vcap_model_kunit.h"
+
+/* keyfields */
+static const struct vcap_field is0_mll_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_MPLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 2,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_tri_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 7,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 53,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 65,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 72,
+ .width = 12,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_MEL_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 7,
+ },
+};
+
+static const struct vcap_field is0_ll_full_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 38,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 51,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 54,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 57,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 58,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 70,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 118,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 183,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 184,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 185,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 187,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 188,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 189,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 195,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 227,
+ .width = 32,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 277,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 19,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 114,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 134,
+ .width = 12,
+ },
+ [VCAP_KF_DST_ENTRY] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 196,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 197,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 213,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 214,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 215,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 225,
+ .width = 32,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 257,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 258,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 259,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 275,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 110,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 113,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 114,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 129,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 132,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 133,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 145,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 193,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 242,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 243,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 264,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 265,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 271,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 399,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 528,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 529,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 545,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 19,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 114,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 134,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 158,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 190,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 222,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 232,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 240,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is0_pure_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 59,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is0_etag_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 7,
+ },
+ [VCAP_KF_8021BR_E_TAGGED] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_8021BR_GRP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 2,
+ },
+ [VCAP_KF_8021BR_ECID_EXT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 8,
+ },
+ [VCAP_KF_8021BR_ECID_BASE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 12,
+ },
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 8,
+ },
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 90,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 138,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 186,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 187,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 203,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 267,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 284,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 86,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 139,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 140,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 142,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 206,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 202,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 226,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 193,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 91,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 228,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 244,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 86,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 116,
+ .width = 1,
+ },
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 117,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 118,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 121,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 169,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 227,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 355,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 483,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 486,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 502,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 518,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 534,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 542,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip6_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 40,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 42,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 43,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 171,
+ .width = 128,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 99,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 196,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 212,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 276,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 2,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 98,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 177,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 210,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 232,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 233,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 234,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 185,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 2,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 202,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 330,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 460,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 477,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 493,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 516,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 517,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_vid_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 42,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 43,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_KF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 2,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 51,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 52,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 32,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 116,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field es2_ip6_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_ACL_GRP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 8,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 42,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 43,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 171,
+ .width = 128,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_MLL] = {
+ .type_id = 0,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_TRI_VID] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_KFS_LL_FULL] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_NORMAL] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_PURE_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_ETAG] = {
+ .type_id = 3,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_VID] = {
+ .type_id = 9,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP4_VID] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_IP6_VID] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_MLL] = is0_mll_keyfield,
+ [VCAP_KFS_TRI_VID] = is0_tri_vid_keyfield,
+ [VCAP_KFS_LL_FULL] = is0_ll_full_keyfield,
+ [VCAP_KFS_NORMAL] = is0_normal_keyfield,
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+ [VCAP_KFS_PURE_5TUPLE_IP4] = is0_pure_5tuple_ip4_keyfield,
+ [VCAP_KFS_ETAG] = is0_etag_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_VID] = is2_ip6_vid_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP4_VID] = es2_ip4_vid_keyfield,
+ [VCAP_KFS_IP6_VID] = es2_ip6_vid_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_MLL] = ARRAY_SIZE(is0_mll_keyfield),
+ [VCAP_KFS_TRI_VID] = ARRAY_SIZE(is0_tri_vid_keyfield),
+ [VCAP_KFS_LL_FULL] = ARRAY_SIZE(is0_ll_full_keyfield),
+ [VCAP_KFS_NORMAL] = ARRAY_SIZE(is0_normal_keyfield),
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+ [VCAP_KFS_PURE_5TUPLE_IP4] = ARRAY_SIZE(is0_pure_5tuple_ip4_keyfield),
+ [VCAP_KFS_ETAG] = ARRAY_SIZE(is0_etag_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_VID] = ARRAY_SIZE(is2_ip6_vid_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP4_VID] = ARRAY_SIZE(es2_ip4_vid_keyfield),
+ [VCAP_KFS_IP6_VID] = ARRAY_SIZE(es2_ip6_vid_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is0_mlbs_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 3,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 12,
+ },
+ [VCAP_AF_FWD_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 58,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 59,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_Q] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 60,
+ .width = 3,
+ },
+ [VCAP_AF_OAM_Y1731_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 63,
+ .width = 3,
+ },
+ [VCAP_AF_OAM_TWAMP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 66,
+ .width = 1,
+ },
+ [VCAP_AF_OAM_IP_BFD_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_TC_LABEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 3,
+ },
+ [VCAP_AF_TTL_LABEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 71,
+ .width = 3,
+ },
+ [VCAP_AF_NUM_VLD_LABELS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 2,
+ },
+ [VCAP_AF_FWD_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 3,
+ },
+ [VCAP_AF_MPLS_OAM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 79,
+ .width = 3,
+ },
+ [VCAP_AF_MPLS_MEP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_MIP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_OAM_FLAVOR] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_IP_CTRL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 8,
+ },
+ [VCAP_AF_S2_KEY_SEL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_AF_S2_KEY_SEL_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 6,
+ },
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_KEY_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 117,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_NORM_W16_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 122,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 129,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_NORMALIZE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 131,
+ .width = 1,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 132,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_mlbs_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 3,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 15,
+ .width = 12,
+ },
+ [VCAP_AF_FWD_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_Q] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 3,
+ },
+ [VCAP_AF_TC_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_AF_TTL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 3,
+ },
+ [VCAP_AF_MPLS_OAM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_MPLS_MEP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 40,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_MIP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_OAM_FLAVOR] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 42,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_IP_CTRL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT_REDUCED] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_KEY_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_NORM_W32_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 55,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_NORMALIZE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 59,
+ .width = 1,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 60,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 63,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_COSID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 9,
+ .width = 3,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 13,
+ },
+ [VCAP_AF_VLAN_POP_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 58,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 59,
+ .width = 2,
+ },
+ [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 61,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_PUSH_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 2,
+ },
+ [VCAP_AF_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 64,
+ .width = 2,
+ },
+ [VCAP_AF_VLAN_WAS_TAGGED] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 2,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 68,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 69,
+ .width = 12,
+ },
+ [VCAP_AF_RT_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 2,
+ },
+ [VCAP_AF_LPM_AFFIX_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_AF_LPM_AFFIX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 10,
+ },
+ [VCAP_AF_RLEG_DMAC_CHK_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_AF_TTL_DECR_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_AF_L3_MAC_UPDATE_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 97,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 98,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_Q] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 3,
+ },
+ [VCAP_AF_MIP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 102,
+ .width = 2,
+ },
+ [VCAP_AF_OAM_Y1731_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 3,
+ },
+ [VCAP_AF_OAM_TWAMP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 107,
+ .width = 1,
+ },
+ [VCAP_AF_OAM_IP_BFD_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 108,
+ .width = 1,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 117,
+ .width = 8,
+ },
+ [VCAP_AF_S2_KEY_SEL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 125,
+ .width = 1,
+ },
+ [VCAP_AF_S2_KEY_SEL_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 6,
+ },
+ [VCAP_AF_INJ_MASQ_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 132,
+ .width = 1,
+ },
+ [VCAP_AF_INJ_MASQ_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 133,
+ .width = 7,
+ },
+ [VCAP_AF_LPORT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 140,
+ .width = 1,
+ },
+ [VCAP_AF_INJ_MASQ_LPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 7,
+ },
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_KEY_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 156,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_NORM_W16_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 161,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 166,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 168,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_NORMALIZE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 170,
+ .width = 1,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_COSID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 13,
+ },
+ [VCAP_AF_VLAN_POP_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 57,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 58,
+ .width = 2,
+ },
+ [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 60,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_PUSH_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 61,
+ .width = 2,
+ },
+ [VCAP_AF_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 63,
+ .width = 2,
+ },
+ [VCAP_AF_VLAN_WAS_TAGGED] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 65,
+ .width = 2,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 12,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 80,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 83,
+ .width = 65,
+ },
+ [VCAP_AF_RT_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_AF_LPM_AFFIX_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_AF_LPM_AFFIX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 10,
+ },
+ [VCAP_AF_RLEG_DMAC_CHK_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 161,
+ .width = 1,
+ },
+ [VCAP_AF_TTL_DECR_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 162,
+ .width = 1,
+ },
+ [VCAP_AF_L3_MAC_UPDATE_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 164,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_Q] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 165,
+ .width = 3,
+ },
+ [VCAP_AF_MIP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 168,
+ .width = 2,
+ },
+ [VCAP_AF_OAM_Y1731_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 3,
+ },
+ [VCAP_AF_OAM_TWAMP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_AF_OAM_IP_BFD_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 174,
+ .width = 1,
+ },
+ [VCAP_AF_RSVD_LBL_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 4,
+ },
+ [VCAP_AF_TC_LABEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 179,
+ .width = 3,
+ },
+ [VCAP_AF_TTL_LABEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 182,
+ .width = 3,
+ },
+ [VCAP_AF_NUM_VLD_LABELS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 185,
+ .width = 2,
+ },
+ [VCAP_AF_FWD_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 187,
+ .width = 3,
+ },
+ [VCAP_AF_MPLS_OAM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 190,
+ .width = 3,
+ },
+ [VCAP_AF_MPLS_MEP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_MIP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 194,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_OAM_FLAVOR] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_AF_MPLS_IP_CTRL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 196,
+ .width = 1,
+ },
+ [VCAP_AF_CUSTOM_ACE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 197,
+ .width = 5,
+ },
+ [VCAP_AF_CUSTOM_ACE_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 202,
+ .width = 2,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 204,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 212,
+ .width = 8,
+ },
+ [VCAP_AF_S2_KEY_SEL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_AF_S2_KEY_SEL_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 221,
+ .width = 6,
+ },
+ [VCAP_AF_INJ_MASQ_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_AF_INJ_MASQ_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 228,
+ .width = 7,
+ },
+ [VCAP_AF_LPORT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 235,
+ .width = 1,
+ },
+ [VCAP_AF_INJ_MASQ_LPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 236,
+ .width = 7,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 243,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 259,
+ .width = 16,
+ },
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 275,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 278,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_KEY_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 283,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_NORM_W16_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 288,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 293,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 295,
+ .width = 2,
+ },
+ [VCAP_AF_NXT_NORMALIZE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 297,
+ .width = 1,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 298,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 301,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_COSID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 3,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_VLAN_POP_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 2,
+ },
+ [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_PUSH_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 2,
+ },
+ [VCAP_AF_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 2,
+ },
+ [VCAP_AF_VLAN_WAS_TAGGED] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 2,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 12,
+ },
+ [VCAP_AF_FWD_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 59,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 60,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_Q] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 61,
+ .width = 3,
+ },
+ [VCAP_AF_MIP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 64,
+ .width = 2,
+ },
+ [VCAP_AF_OAM_Y1731_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 3,
+ },
+ [VCAP_AF_LPORT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 69,
+ .width = 1,
+ },
+ [VCAP_AF_INJ_MASQ_LPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 70,
+ .width = 7,
+ },
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 79,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 80,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_KEY_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 5,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_IS_INNER_ACL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_CPU_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 6,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_DLB_OFFSET] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 3,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 30,
+ .width = 68,
+ },
+ [VCAP_AF_RSDX_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 98,
+ .width = 1,
+ },
+ [VCAP_AF_RSDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 12,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 11,
+ },
+ [VCAP_AF_TTL_UPDATE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 124,
+ .width = 1,
+ },
+ [VCAP_AF_SAM_SEQ_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 125,
+ .width = 1,
+ },
+ [VCAP_AF_TCP_UDP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 126,
+ .width = 1,
+ },
+ [VCAP_AF_TCP_UDP_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 16,
+ },
+ [VCAP_AF_TCP_UDP_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 143,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 12,
+ },
+ [VCAP_AF_SWAP_MAC_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 203,
+ .width = 1,
+ },
+ [VCAP_AF_ACL_RT_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 204,
+ .width = 4,
+ },
+ [VCAP_AF_ACL_MAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 208,
+ .width = 48,
+ },
+ [VCAP_AF_DMAC_OFFSET_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 256,
+ .width = 1,
+ },
+ [VCAP_AF_PTP_MASTER_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 257,
+ .width = 2,
+ },
+ [VCAP_AF_LOG_MSG_INTERVAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 259,
+ .width = 4,
+ },
+ [VCAP_AF_SIP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 263,
+ .width = 5,
+ },
+ [VCAP_AF_RLEG_STAT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 268,
+ .width = 3,
+ },
+ [VCAP_AF_IGR_ACL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 271,
+ .width = 1,
+ },
+ [VCAP_AF_EGR_ACL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 272,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 16,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 7,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 34,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 11,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_MLBS] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_MLBS_REDUCED] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_MLBS] = is0_mlbs_actionfield,
+ [VCAP_AFS_MLBS_REDUCED] = is0_mlbs_reduced_actionfield,
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_MLBS] = ARRAY_SIZE(is0_mlbs_actionfield),
+ [VCAP_AFS_MLBS_REDUCED] = ARRAY_SIZE(is0_mlbs_reduced_actionfield),
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 52,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 110,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 21,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 42,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MLL] = "VCAP_KFS_MLL",
+ [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_TRI_VID] = "VCAP_KFS_TRI_VID",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_MLBS] = "VCAP_AFS_MLBS",
+ [VCAP_AFS_MLBS_REDUCED] = "VCAP_AFS_MLBS_REDUCED",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_DST_ENTRY] = "DST_ENTRY",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_ETYPE_MPLS] = "ETYPE_MPLS",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DMAC_DIP_MATCH] = "L3_DMAC_DIP_MATCH",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_SMAC_SIP_MATCH] = "L3_SMAC_SIP_MATCH",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_MAC] = "ACL_MAC",
+ [VCAP_AF_ACL_RT_MODE] = "ACL_RT_MODE",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_COSID_ENA] = "COSID_ENA",
+ [VCAP_AF_COSID_VAL] = "COSID_VAL",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_DIS] = "CPU_DIS",
+ [VCAP_AF_CPU_ENA] = "CPU_ENA",
+ [VCAP_AF_CPU_Q] = "CPU_Q",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_CUSTOM_ACE_ENA] = "CUSTOM_ACE_ENA",
+ [VCAP_AF_CUSTOM_ACE_OFFSET] = "CUSTOM_ACE_OFFSET",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DLB_OFFSET] = "DLB_OFFSET",
+ [VCAP_AF_DMAC_OFFSET_ENA] = "DMAC_OFFSET_ENA",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_EGR_ACL_ENA] = "EGR_ACL_ENA",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_FWD_DIS] = "FWD_DIS",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_TYPE] = "FWD_TYPE",
+ [VCAP_AF_GVID_ADD_REPLACE_SEL] = "GVID_ADD_REPLACE_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_IGR_ACL_ENA] = "IGR_ACL_ENA",
+ [VCAP_AF_INJ_MASQ_ENA] = "INJ_MASQ_ENA",
+ [VCAP_AF_INJ_MASQ_LPORT] = "INJ_MASQ_LPORT",
+ [VCAP_AF_INJ_MASQ_PORT] = "INJ_MASQ_PORT",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_IS_INNER_ACL] = "IS_INNER_ACL",
+ [VCAP_AF_L3_MAC_UPDATE_DIS] = "L3_MAC_UPDATE_DIS",
+ [VCAP_AF_LOG_MSG_INTERVAL] = "LOG_MSG_INTERVAL",
+ [VCAP_AF_LPM_AFFIX_ENA] = "LPM_AFFIX_ENA",
+ [VCAP_AF_LPM_AFFIX_VAL] = "LPM_AFFIX_VAL",
+ [VCAP_AF_LPORT_ENA] = "LPORT_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIP_SEL] = "MIP_SEL",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_MPLS_IP_CTRL_ENA] = "MPLS_IP_CTRL_ENA",
+ [VCAP_AF_MPLS_MEP_ENA] = "MPLS_MEP_ENA",
+ [VCAP_AF_MPLS_MIP_ENA] = "MPLS_MIP_ENA",
+ [VCAP_AF_MPLS_OAM_FLAVOR] = "MPLS_OAM_FLAVOR",
+ [VCAP_AF_MPLS_OAM_TYPE] = "MPLS_OAM_TYPE",
+ [VCAP_AF_NUM_VLD_LABELS] = "NUM_VLD_LABELS",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_NXT_KEY_TYPE] = "NXT_KEY_TYPE",
+ [VCAP_AF_NXT_NORMALIZE] = "NXT_NORMALIZE",
+ [VCAP_AF_NXT_NORM_W16_OFFSET] = "NXT_NORM_W16_OFFSET",
+ [VCAP_AF_NXT_NORM_W32_OFFSET] = "NXT_NORM_W32_OFFSET",
+ [VCAP_AF_NXT_OFFSET_FROM_TYPE] = "NXT_OFFSET_FROM_TYPE",
+ [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = "NXT_TYPE_AFTER_OFFSET",
+ [VCAP_AF_OAM_IP_BFD_ENA] = "OAM_IP_BFD_ENA",
+ [VCAP_AF_OAM_TWAMP_ENA] = "OAM_TWAMP_ENA",
+ [VCAP_AF_OAM_Y1731_SEL] = "OAM_Y1731_SEL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT_SEL] = "PIPELINE_ACT_SEL",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_PIPELINE_PT_REDUCED] = "PIPELINE_PT_REDUCED",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PTP_MASTER_SEL] = "PTP_MASTER_SEL",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_CMD] = "REW_CMD",
+ [VCAP_AF_RLEG_DMAC_CHK_DIS] = "RLEG_DMAC_CHK_DIS",
+ [VCAP_AF_RLEG_STAT_IDX] = "RLEG_STAT_IDX",
+ [VCAP_AF_RSDX_ENA] = "RSDX_ENA",
+ [VCAP_AF_RSDX_VAL] = "RSDX_VAL",
+ [VCAP_AF_RSVD_LBL_VAL] = "RSVD_LBL_VAL",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_RT_SEL] = "RT_SEL",
+ [VCAP_AF_S2_KEY_SEL_ENA] = "S2_KEY_SEL_ENA",
+ [VCAP_AF_S2_KEY_SEL_IDX] = "S2_KEY_SEL_IDX",
+ [VCAP_AF_SAM_SEQ_ENA] = "SAM_SEQ_ENA",
+ [VCAP_AF_SIP_IDX] = "SIP_IDX",
+ [VCAP_AF_SWAP_MAC_ENA] = "SWAP_MAC_ENA",
+ [VCAP_AF_TCP_UDP_DPORT] = "TCP_UDP_DPORT",
+ [VCAP_AF_TCP_UDP_ENA] = "TCP_UDP_ENA",
+ [VCAP_AF_TCP_UDP_SPORT] = "TCP_UDP_SPORT",
+ [VCAP_AF_TC_ENA] = "TC_ENA",
+ [VCAP_AF_TC_LABEL] = "TC_LABEL",
+ [VCAP_AF_TPID_SEL] = "TPID_SEL",
+ [VCAP_AF_TTL_DECR_DIS] = "TTL_DECR_DIS",
+ [VCAP_AF_TTL_ENA] = "TTL_ENA",
+ [VCAP_AF_TTL_LABEL] = "TTL_LABEL",
+ [VCAP_AF_TTL_UPDATE_ENA] = "TTL_UPDATE_ENA",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+ [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT",
+ [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA",
+ [VCAP_AF_VLAN_PUSH_CNT] = "VLAN_PUSH_CNT",
+ [VCAP_AF_VLAN_PUSH_CNT_ENA] = "VLAN_PUSH_CNT_ENA",
+ [VCAP_AF_VLAN_WAS_TAGGED] = "VLAN_WAS_TAGGED",
+};
+
+/* VCAPs */
+const struct vcap_info kunit_test_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 140,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 73,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 21,
+ .default_cnt = 74,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics kunit_test_vcap_stats = {
+ .name = "kunit_test",
+ .count = 3,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
new file mode 100644
index 000000000000..b5a74f0eef9b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP test model interface for kunit testing
+ */
+
+#ifndef __VCAP_MODEL_KUNIT_H__
+#define __VCAP_MODEL_KUNIT_H__
+extern const struct vcap_info kunit_test_vcaps[];
+extern const struct vcap_statistics kunit_test_vcap_stats;
+#endif /* __VCAP_MODEL_KUNIT_H__ */
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index fe4e7a7d9c0b..090e6b983243 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -19,6 +19,7 @@ config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
depends on PCI_MSI && X86_64
depends on PCI_HYPERV
+ select AUXILIARY_BUS
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
deleted file mode 100644
index 65c24ee49efd..000000000000
--- a/drivers/net/ethernet/microsoft/mana/gdma.h
+++ /dev/null
@@ -1,692 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright (c) 2021, Microsoft Corporation. */
-
-#ifndef _GDMA_H
-#define _GDMA_H
-
-#include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-
-#include "shm_channel.h"
-
-/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
- * them are naturally aligned and hence don't need __packed.
- */
-
-enum gdma_request_type {
- GDMA_VERIFY_VF_DRIVER_VERSION = 1,
- GDMA_QUERY_MAX_RESOURCES = 2,
- GDMA_LIST_DEVICES = 3,
- GDMA_REGISTER_DEVICE = 4,
- GDMA_DEREGISTER_DEVICE = 5,
- GDMA_GENERATE_TEST_EQE = 10,
- GDMA_CREATE_QUEUE = 12,
- GDMA_DISABLE_QUEUE = 13,
- GDMA_CREATE_DMA_REGION = 25,
- GDMA_DMA_REGION_ADD_PAGES = 26,
- GDMA_DESTROY_DMA_REGION = 27,
-};
-
-enum gdma_queue_type {
- GDMA_INVALID_QUEUE,
- GDMA_SQ,
- GDMA_RQ,
- GDMA_CQ,
- GDMA_EQ,
-};
-
-enum gdma_work_request_flags {
- GDMA_WR_NONE = 0,
- GDMA_WR_OOB_IN_SGL = BIT(0),
- GDMA_WR_PAD_BY_SGE0 = BIT(1),
-};
-
-enum gdma_eqe_type {
- GDMA_EQE_COMPLETION = 3,
- GDMA_EQE_TEST_EVENT = 64,
- GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
- GDMA_EQE_HWC_INIT_DATA = 130,
- GDMA_EQE_HWC_INIT_DONE = 131,
-};
-
-enum {
- GDMA_DEVICE_NONE = 0,
- GDMA_DEVICE_HWC = 1,
- GDMA_DEVICE_MANA = 2,
-};
-
-struct gdma_resource {
- /* Protect the bitmap */
- spinlock_t lock;
-
- /* The bitmap size in bits. */
- u32 size;
-
- /* The bitmap tracks the resources. */
- unsigned long *map;
-};
-
-union gdma_doorbell_entry {
- u64 as_uint64;
-
- struct {
- u64 id : 24;
- u64 reserved : 8;
- u64 tail_ptr : 31;
- u64 arm : 1;
- } cq;
-
- struct {
- u64 id : 24;
- u64 wqe_cnt : 8;
- u64 tail_ptr : 32;
- } rq;
-
- struct {
- u64 id : 24;
- u64 reserved : 8;
- u64 tail_ptr : 32;
- } sq;
-
- struct {
- u64 id : 16;
- u64 reserved : 16;
- u64 tail_ptr : 31;
- u64 arm : 1;
- } eq;
-}; /* HW DATA */
-
-struct gdma_msg_hdr {
- u32 hdr_type;
- u32 msg_type;
- u16 msg_version;
- u16 hwc_msg_id;
- u32 msg_size;
-}; /* HW DATA */
-
-struct gdma_dev_id {
- union {
- struct {
- u16 type;
- u16 instance;
- };
-
- u32 as_uint32;
- };
-}; /* HW DATA */
-
-struct gdma_req_hdr {
- struct gdma_msg_hdr req;
- struct gdma_msg_hdr resp; /* The expected response */
- struct gdma_dev_id dev_id;
- u32 activity_id;
-}; /* HW DATA */
-
-struct gdma_resp_hdr {
- struct gdma_msg_hdr response;
- struct gdma_dev_id dev_id;
- u32 activity_id;
- u32 status;
- u32 reserved;
-}; /* HW DATA */
-
-struct gdma_general_req {
- struct gdma_req_hdr hdr;
-}; /* HW DATA */
-
-#define GDMA_MESSAGE_V1 1
-
-struct gdma_general_resp {
- struct gdma_resp_hdr hdr;
-}; /* HW DATA */
-
-#define GDMA_STANDARD_HEADER_TYPE 0
-
-static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
- u32 req_size, u32 resp_size)
-{
- hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
- hdr->req.msg_type = code;
- hdr->req.msg_version = GDMA_MESSAGE_V1;
- hdr->req.msg_size = req_size;
-
- hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
- hdr->resp.msg_type = code;
- hdr->resp.msg_version = GDMA_MESSAGE_V1;
- hdr->resp.msg_size = resp_size;
-}
-
-/* The 16-byte struct is part of the GDMA work queue entry (WQE). */
-struct gdma_sge {
- u64 address;
- u32 mem_key;
- u32 size;
-}; /* HW DATA */
-
-struct gdma_wqe_request {
- struct gdma_sge *sgl;
- u32 num_sge;
-
- u32 inline_oob_size;
- const void *inline_oob_data;
-
- u32 flags;
- u32 client_data_unit;
-};
-
-enum gdma_page_type {
- GDMA_PAGE_TYPE_4K,
-};
-
-#define GDMA_INVALID_DMA_REGION 0
-
-struct gdma_mem_info {
- struct device *dev;
-
- dma_addr_t dma_handle;
- void *virt_addr;
- u64 length;
-
- /* Allocated by the PF driver */
- u64 gdma_region;
-};
-
-#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
-
-struct gdma_dev {
- struct gdma_context *gdma_context;
-
- struct gdma_dev_id dev_id;
-
- u32 pdid;
- u32 doorbell;
- u32 gpa_mkey;
-
- /* GDMA driver specific pointer */
- void *driver_data;
-};
-
-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
-
-#define GDMA_CQE_SIZE 64
-#define GDMA_EQE_SIZE 16
-#define GDMA_MAX_SQE_SIZE 512
-#define GDMA_MAX_RQE_SIZE 256
-
-#define GDMA_COMP_DATA_SIZE 0x3C
-
-#define GDMA_EVENT_DATA_SIZE 0xC
-
-/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
-#define GDMA_WQE_BU_SIZE 32
-
-#define INVALID_PDID UINT_MAX
-#define INVALID_DOORBELL UINT_MAX
-#define INVALID_MEM_KEY UINT_MAX
-#define INVALID_QUEUE_ID UINT_MAX
-#define INVALID_PCI_MSIX_INDEX UINT_MAX
-
-struct gdma_comp {
- u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
- u32 wq_num;
- bool is_sq;
-};
-
-struct gdma_event {
- u32 details[GDMA_EVENT_DATA_SIZE / 4];
- u8 type;
-};
-
-struct gdma_queue;
-
-struct mana_eq {
- struct gdma_queue *eq;
-};
-
-typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
- struct gdma_event *e);
-
-typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
-
-/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
- * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
- * driver increases the 'head' in BUs rather than in bytes, and notifies
- * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
- * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
- *
- * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
- * processed, the driver increases the 'tail' to indicate that WQEs have
- * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
- *
- * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
- * that the EQ/CQ is big enough so they can't overflow, and the driver uses
- * the owner bits mechanism to detect if the queue has become empty.
- */
-struct gdma_queue {
- struct gdma_dev *gdma_dev;
-
- enum gdma_queue_type type;
- u32 id;
-
- struct gdma_mem_info mem_info;
-
- void *queue_mem_ptr;
- u32 queue_size;
-
- bool monitor_avl_buf;
-
- u32 head;
- u32 tail;
-
- /* Extra fields specific to EQ/CQ. */
- union {
- struct {
- bool disable_needed;
-
- gdma_eq_callback *callback;
- void *context;
-
- unsigned int msix_index;
-
- u32 log2_throttle_limit;
- } eq;
-
- struct {
- gdma_cq_callback *callback;
- void *context;
-
- struct gdma_queue *parent; /* For CQ/EQ relationship */
- } cq;
- };
-};
-
-struct gdma_queue_spec {
- enum gdma_queue_type type;
- bool monitor_avl_buf;
- unsigned int queue_size;
-
- /* Extra fields specific to EQ/CQ. */
- union {
- struct {
- gdma_eq_callback *callback;
- void *context;
-
- unsigned long log2_throttle_limit;
- } eq;
-
- struct {
- gdma_cq_callback *callback;
- void *context;
-
- struct gdma_queue *parent_eq;
-
- } cq;
- };
-};
-
-struct gdma_irq_context {
- void (*handler)(void *arg);
- void *arg;
-};
-
-struct gdma_context {
- struct device *dev;
-
- /* Per-vPort max number of queues */
- unsigned int max_num_queues;
- unsigned int max_num_msix;
- unsigned int num_msix_usable;
- struct gdma_resource msix_resource;
- struct gdma_irq_context *irq_contexts;
-
- /* This maps a CQ index to the queue structure. */
- unsigned int max_num_cqs;
- struct gdma_queue **cq_table;
-
- /* Protect eq_test_event and test_event_eq_id */
- struct mutex eq_test_event_mutex;
- struct completion eq_test_event;
- u32 test_event_eq_id;
-
- bool is_pf;
- void __iomem *bar0_va;
- void __iomem *shm_base;
- void __iomem *db_page_base;
- u32 db_page_size;
-
- /* Shared memory chanenl (used to bootstrap HWC) */
- struct shm_channel shm_channel;
-
- /* Hardware communication channel (HWC) */
- struct gdma_dev hwc;
-
- /* Azure network adapter */
- struct gdma_dev mana;
-};
-
-#define MAX_NUM_GDMA_DEVICES 4
-
-static inline bool mana_gd_is_mana(struct gdma_dev *gd)
-{
- return gd->dev_id.type == GDMA_DEVICE_MANA;
-}
-
-static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
-{
- return gd->dev_id.type == GDMA_DEVICE_HWC;
-}
-
-u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
-u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
-
-int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
-
-int mana_gd_create_hwc_queue(struct gdma_dev *gd,
- const struct gdma_queue_spec *spec,
- struct gdma_queue **queue_ptr);
-
-int mana_gd_create_mana_eq(struct gdma_dev *gd,
- const struct gdma_queue_spec *spec,
- struct gdma_queue **queue_ptr);
-
-int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
- const struct gdma_queue_spec *spec,
- struct gdma_queue **queue_ptr);
-
-void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
-
-int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
-
-void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
-
-struct gdma_wqe {
- u32 reserved :24;
- u32 last_vbytes :8;
-
- union {
- u32 flags;
-
- struct {
- u32 num_sge :8;
- u32 inline_oob_size_div4:3;
- u32 client_oob_in_sgl :1;
- u32 reserved1 :4;
- u32 client_data_unit :14;
- u32 reserved2 :2;
- };
- };
-}; /* HW DATA */
-
-#define INLINE_OOB_SMALL_SIZE 8
-#define INLINE_OOB_LARGE_SIZE 24
-
-#define MAX_TX_WQE_SIZE 512
-#define MAX_RX_WQE_SIZE 256
-
-struct gdma_cqe {
- u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
-
- union {
- u32 as_uint32;
-
- struct {
- u32 wq_num : 24;
- u32 is_sq : 1;
- u32 reserved : 4;
- u32 owner_bits : 3;
- };
- } cqe_info;
-}; /* HW DATA */
-
-#define GDMA_CQE_OWNER_BITS 3
-
-#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
-
-#define SET_ARM_BIT 1
-
-#define GDMA_EQE_OWNER_BITS 3
-
-union gdma_eqe_info {
- u32 as_uint32;
-
- struct {
- u32 type : 8;
- u32 reserved1 : 8;
- u32 client_id : 2;
- u32 reserved2 : 11;
- u32 owner_bits : 3;
- };
-}; /* HW DATA */
-
-#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
-#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
-
-struct gdma_eqe {
- u32 details[GDMA_EVENT_DATA_SIZE / 4];
- u32 eqe_info;
-}; /* HW DATA */
-
-#define GDMA_REG_DB_PAGE_OFFSET 8
-#define GDMA_REG_DB_PAGE_SIZE 0x10
-#define GDMA_REG_SHM_OFFSET 0x18
-
-#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
-#define GDMA_PF_REG_DB_PAGE_OFF 0xC8
-#define GDMA_PF_REG_SHM_OFF 0x70
-
-#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
-
-#define MANA_PF_DEVICE_ID 0x00B9
-#define MANA_VF_DEVICE_ID 0x00BA
-
-struct gdma_posted_wqe_info {
- u32 wqe_size_in_bu;
-};
-
-/* GDMA_GENERATE_TEST_EQE */
-struct gdma_generate_test_event_req {
- struct gdma_req_hdr hdr;
- u32 queue_index;
-}; /* HW DATA */
-
-/* GDMA_VERIFY_VF_DRIVER_VERSION */
-enum {
- GDMA_PROTOCOL_V1 = 1,
- GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
- GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
-};
-
-#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
-
-/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
- * so the driver is able to reliably support features like busy_poll.
- */
-#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
-
-#define GDMA_DRV_CAP_FLAGS1 \
- (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
- GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
-
-#define GDMA_DRV_CAP_FLAGS2 0
-
-#define GDMA_DRV_CAP_FLAGS3 0
-
-#define GDMA_DRV_CAP_FLAGS4 0
-
-struct gdma_verify_ver_req {
- struct gdma_req_hdr hdr;
-
- /* Mandatory fields required for protocol establishment */
- u64 protocol_ver_min;
- u64 protocol_ver_max;
-
- /* Gdma Driver Capability Flags */
- u64 gd_drv_cap_flags1;
- u64 gd_drv_cap_flags2;
- u64 gd_drv_cap_flags3;
- u64 gd_drv_cap_flags4;
-
- /* Advisory fields */
- u64 drv_ver;
- u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
- u32 reserved;
- u32 os_ver_major;
- u32 os_ver_minor;
- u32 os_ver_build;
- u32 os_ver_platform;
- u64 reserved_2;
- u8 os_ver_str1[128];
- u8 os_ver_str2[128];
- u8 os_ver_str3[128];
- u8 os_ver_str4[128];
-}; /* HW DATA */
-
-struct gdma_verify_ver_resp {
- struct gdma_resp_hdr hdr;
- u64 gdma_protocol_ver;
- u64 pf_cap_flags1;
- u64 pf_cap_flags2;
- u64 pf_cap_flags3;
- u64 pf_cap_flags4;
-}; /* HW DATA */
-
-/* GDMA_QUERY_MAX_RESOURCES */
-struct gdma_query_max_resources_resp {
- struct gdma_resp_hdr hdr;
- u32 status;
- u32 max_sq;
- u32 max_rq;
- u32 max_cq;
- u32 max_eq;
- u32 max_db;
- u32 max_mst;
- u32 max_cq_mod_ctx;
- u32 max_mod_cq;
- u32 max_msix;
-}; /* HW DATA */
-
-/* GDMA_LIST_DEVICES */
-struct gdma_list_devices_resp {
- struct gdma_resp_hdr hdr;
- u32 num_of_devs;
- u32 reserved;
- struct gdma_dev_id devs[64];
-}; /* HW DATA */
-
-/* GDMA_REGISTER_DEVICE */
-struct gdma_register_device_resp {
- struct gdma_resp_hdr hdr;
- u32 pdid;
- u32 gpa_mkey;
- u32 db_id;
-}; /* HW DATA */
-
-/* GDMA_CREATE_QUEUE */
-struct gdma_create_queue_req {
- struct gdma_req_hdr hdr;
- u32 type;
- u32 reserved1;
- u32 pdid;
- u32 doolbell_id;
- u64 gdma_region;
- u32 reserved2;
- u32 queue_size;
- u32 log2_throttle_limit;
- u32 eq_pci_msix_index;
- u32 cq_mod_ctx_id;
- u32 cq_parent_eq_id;
- u8 rq_drop_on_overrun;
- u8 rq_err_on_wqe_overflow;
- u8 rq_chain_rec_wqes;
- u8 sq_hw_db;
- u32 reserved3;
-}; /* HW DATA */
-
-struct gdma_create_queue_resp {
- struct gdma_resp_hdr hdr;
- u32 queue_index;
-}; /* HW DATA */
-
-/* GDMA_DISABLE_QUEUE */
-struct gdma_disable_queue_req {
- struct gdma_req_hdr hdr;
- u32 type;
- u32 queue_index;
- u32 alloc_res_id_on_creation;
-}; /* HW DATA */
-
-/* GDMA_CREATE_DMA_REGION */
-struct gdma_create_dma_region_req {
- struct gdma_req_hdr hdr;
-
- /* The total size of the DMA region */
- u64 length;
-
- /* The offset in the first page */
- u32 offset_in_page;
-
- /* enum gdma_page_type */
- u32 gdma_page_type;
-
- /* The total number of pages */
- u32 page_count;
-
- /* If page_addr_list_len is smaller than page_count,
- * the remaining page addresses will be added via the
- * message GDMA_DMA_REGION_ADD_PAGES.
- */
- u32 page_addr_list_len;
- u64 page_addr_list[];
-}; /* HW DATA */
-
-struct gdma_create_dma_region_resp {
- struct gdma_resp_hdr hdr;
- u64 gdma_region;
-}; /* HW DATA */
-
-/* GDMA_DMA_REGION_ADD_PAGES */
-struct gdma_dma_region_add_pages_req {
- struct gdma_req_hdr hdr;
-
- u64 gdma_region;
-
- u32 page_addr_list_len;
- u32 reserved3;
-
- u64 page_addr_list[];
-}; /* HW DATA */
-
-/* GDMA_DESTROY_DMA_REGION */
-struct gdma_destroy_dma_region_req {
- struct gdma_req_hdr hdr;
-
- u64 gdma_region;
-}; /* HW DATA */
-
-int mana_gd_verify_vf_version(struct pci_dev *pdev);
-
-int mana_gd_register_device(struct gdma_dev *gd);
-int mana_gd_deregister_device(struct gdma_dev *gd);
-
-int mana_gd_post_work_request(struct gdma_queue *wq,
- const struct gdma_wqe_request *wqe_req,
- struct gdma_posted_wqe_info *wqe_info);
-
-int mana_gd_post_and_ring(struct gdma_queue *queue,
- const struct gdma_wqe_request *wqe,
- struct gdma_posted_wqe_info *wqe_info);
-
-int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
-void mana_gd_free_res_map(struct gdma_resource *r);
-
-void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
- struct gdma_queue *queue);
-
-int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
- struct gdma_mem_info *gmi);
-
-void mana_gd_free_memory(struct gdma_mem_info *gmi);
-
-int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
- u32 resp_len, void *resp);
-#endif /* _GDMA_H */
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index a6f99b4344d9..690b69cae4e3 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -6,7 +6,7 @@
#include <linux/utsname.h>
#include <linux/version.h>
-#include "mana.h"
+#include <net/mana/mana.h>
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
@@ -44,6 +44,9 @@ static void mana_gd_init_vf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
+
gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}
@@ -149,6 +152,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
+EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi)
@@ -194,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
req.type = queue->type;
req.pdid = queue->gdma_dev->pdid;
req.doolbell_id = queue->gdma_dev->doorbell;
- req.gdma_region = queue->mem_info.gdma_region;
+ req.gdma_region = queue->mem_info.dma_region_handle;
req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
req.eq_pci_msix_index = queue->eq.msix_index;
@@ -208,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
queue->id = resp.queue_index;
queue->eq.disable_needed = true;
- queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
return 0;
}
@@ -667,24 +671,30 @@ free_q:
return err;
}
-static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+ gdma_obj_handle_t dma_region_handle)
{
struct gdma_destroy_dma_region_req req = {};
struct gdma_general_resp resp = {};
int err;
- if (gdma_region == GDMA_INVALID_DMA_REGION)
- return;
+ if (dma_region_handle == GDMA_INVALID_DMA_REGION)
+ return 0;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
sizeof(resp));
- req.gdma_region = gdma_region;
+ req.dma_region_handle = dma_region_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
- if (err || resp.hdr.status)
+ if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status);
+ return -EPROTO;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
@@ -729,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
if (err)
goto out;
- if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+ if (resp.hdr.status ||
+ resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
resp.hdr.status);
err = -EPROTO;
goto out;
}
- gmi->gdma_region = resp.gdma_region;
+ gmi->dma_region_handle = resp.dma_region_handle;
out:
kfree(req);
return err;
@@ -859,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
return;
}
- mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+ mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
mana_gd_free_memory(gmi);
kfree(queue);
}
@@ -1208,8 +1219,10 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
unsigned int max_irqs;
+ u16 *cpus;
+ cpumask_var_t req_mask;
int nvec, irq;
- int err, i, j;
+ int err, i = 0, j;
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1228,7 +1241,21 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
+ cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
+ if (!cpus) {
+ err = -ENOMEM;
+ goto free_mask;
+ }
+ for (i = 0; i < nvec; i++)
+ cpus[i] = cpumask_local_spread(i, gc->numa_node);
+
for (i = 0; i < nvec; i++) {
+ cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
@@ -1236,13 +1263,17 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
- goto free_irq;
+ goto free_mask;
}
err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
if (err)
- goto free_irq;
+ goto free_mask;
+ irq_set_affinity_and_hint(irq, req_mask);
+ cpumask_clear(req_mask);
}
+ free_cpumask_var(req_mask);
+ kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@@ -1253,6 +1284,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
+free_mask:
+ free_cpumask_var(req_mask);
+ kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
@@ -1370,6 +1404,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
+ err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set dma device segment size\n");
+ goto release_region;
+ }
+
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
if (!gc)
@@ -1377,11 +1417,13 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&gc->eq_test_event_mutex);
pci_set_drvdata(pdev, gc);
+ gc->bar0_pa = pci_resource_start(pdev, 0);
bar0_va = pci_iomap(pdev, bar, 0);
if (!bar0_va)
goto free_gc;
+ gc->numa_node = dev_to_node(&pdev->dev);
gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 543a5d5c304f..9d1507eba5b9 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
-#include "gdma.h"
-#include "hw_channel.h"
+#include <net/mana/gdma.h>
+#include <net/mana/hw_channel.h>
static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
{
@@ -836,7 +836,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
goto out;
}
- if (ctx->status_code) {
+ if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
ctx->status_code);
err = -EPROTO;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h
deleted file mode 100644
index 6a757a6e2732..000000000000
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright (c) 2021, Microsoft Corporation. */
-
-#ifndef _HW_CHANNEL_H
-#define _HW_CHANNEL_H
-
-#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4
-
-#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000
-#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000
-
-#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1
-
-#define HWC_INIT_DATA_CQID 1
-#define HWC_INIT_DATA_RQID 2
-#define HWC_INIT_DATA_SQID 3
-#define HWC_INIT_DATA_QUEUE_DEPTH 4
-#define HWC_INIT_DATA_MAX_REQUEST 5
-#define HWC_INIT_DATA_MAX_RESPONSE 6
-#define HWC_INIT_DATA_MAX_NUM_CQS 7
-#define HWC_INIT_DATA_PDID 8
-#define HWC_INIT_DATA_GPA_MKEY 9
-#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
-#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
-
-/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
- * them are naturally aligned and hence don't need __packed.
- */
-
-union hwc_init_eq_id_db {
- u32 as_uint32;
-
- struct {
- u32 eq_id : 16;
- u32 doorbell : 16;
- };
-}; /* HW DATA */
-
-union hwc_init_type_data {
- u32 as_uint32;
-
- struct {
- u32 value : 24;
- u32 type : 8;
- };
-}; /* HW DATA */
-
-struct hwc_rx_oob {
- u32 type : 6;
- u32 eom : 1;
- u32 som : 1;
- u32 vendor_err : 8;
- u32 reserved1 : 16;
-
- u32 src_virt_wq : 24;
- u32 src_vfid : 8;
-
- u32 reserved2;
-
- union {
- u32 wqe_addr_low;
- u32 wqe_offset;
- };
-
- u32 wqe_addr_high;
-
- u32 client_data_unit : 14;
- u32 reserved3 : 18;
-
- u32 tx_oob_data_size;
-
- u32 chunk_offset : 21;
- u32 reserved4 : 11;
-}; /* HW DATA */
-
-struct hwc_tx_oob {
- u32 reserved1;
-
- u32 reserved2;
-
- u32 vrq_id : 24;
- u32 dest_vfid : 8;
-
- u32 vrcq_id : 24;
- u32 reserved3 : 8;
-
- u32 vscq_id : 24;
- u32 loopback : 1;
- u32 lso_override: 1;
- u32 dest_pf : 1;
- u32 reserved4 : 5;
-
- u32 vsq_id : 24;
- u32 reserved5 : 8;
-}; /* HW DATA */
-
-struct hwc_work_request {
- void *buf_va;
- void *buf_sge_addr;
- u32 buf_len;
- u32 msg_size;
-
- struct gdma_wqe_request wqe_req;
- struct hwc_tx_oob tx_oob;
-
- struct gdma_sge sge;
-};
-
-/* hwc_dma_buf represents the array of in-flight WQEs.
- * mem_info as know as the GDMA mapped memory is partitioned and used by
- * in-flight WQEs.
- * The number of WQEs is determined by the number of in-flight messages.
- */
-struct hwc_dma_buf {
- struct gdma_mem_info mem_info;
-
- u32 gpa_mkey;
-
- u32 num_reqs;
- struct hwc_work_request reqs[];
-};
-
-typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
- const struct hwc_rx_oob *rx_oob);
-
-typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id,
- const struct hwc_rx_oob *rx_oob);
-
-struct hwc_cq {
- struct hw_channel_context *hwc;
-
- struct gdma_queue *gdma_cq;
- struct gdma_queue *gdma_eq;
- struct gdma_comp *comp_buf;
- u16 queue_depth;
-
- hwc_rx_event_handler_t *rx_event_handler;
- void *rx_event_ctx;
-
- hwc_tx_event_handler_t *tx_event_handler;
- void *tx_event_ctx;
-};
-
-struct hwc_wq {
- struct hw_channel_context *hwc;
-
- struct gdma_queue *gdma_wq;
- struct hwc_dma_buf *msg_buf;
- u16 queue_depth;
-
- struct hwc_cq *hwc_cq;
-};
-
-struct hwc_caller_ctx {
- struct completion comp_event;
- void *output_buf;
- u32 output_buflen;
-
- u32 error; /* Linux error code */
- u32 status_code;
-};
-
-struct hw_channel_context {
- struct gdma_dev *gdma_dev;
- struct device *dev;
-
- u16 num_inflight_msg;
- u32 max_req_msg_size;
-
- u16 hwc_init_q_depth_max;
- u32 hwc_init_max_req_msg_size;
- u32 hwc_init_max_resp_msg_size;
-
- struct completion hwc_init_eqe_comp;
-
- struct hwc_wq *rxq;
- struct hwc_wq *txq;
- struct hwc_cq *cq;
-
- struct semaphore sema;
- struct gdma_resource inflight_msg_res;
-
- u32 pf_dest_vrq_id;
- u32 pf_dest_vrcq_id;
-
- struct hwc_caller_ctx *caller_ctx;
-};
-
-int mana_hwc_create_channel(struct gdma_context *gc);
-void mana_hwc_destroy_channel(struct gdma_context *gc);
-
-int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
- const void *req, u32 resp_len, void *resp);
-
-#endif /* _HW_CHANNEL_H */
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
deleted file mode 100644
index d58be64374c8..000000000000
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ /dev/null
@@ -1,634 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright (c) 2021, Microsoft Corporation. */
-
-#ifndef _MANA_H
-#define _MANA_H
-
-#include "gdma.h"
-#include "hw_channel.h"
-
-/* Microsoft Azure Network Adapter (MANA)'s definitions
- *
- * Structures labeled with "HW DATA" are exchanged with the hardware. All of
- * them are naturally aligned and hence don't need __packed.
- */
-
-/* MANA protocol version */
-#define MANA_MAJOR_VERSION 0
-#define MANA_MINOR_VERSION 1
-#define MANA_MICRO_VERSION 1
-
-typedef u64 mana_handle_t;
-#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
-
-enum TRI_STATE {
- TRI_STATE_UNKNOWN = -1,
- TRI_STATE_FALSE = 0,
- TRI_STATE_TRUE = 1
-};
-
-/* Number of entries for hardware indirection table must be in power of 2 */
-#define MANA_INDIRECT_TABLE_SIZE 64
-#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
-
-/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
-#define MANA_HASH_KEY_SIZE 40
-
-#define COMP_ENTRY_SIZE 64
-
-#define ADAPTER_MTU_SIZE 1500
-#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
-
-#define RX_BUFFERS_PER_QUEUE 512
-
-#define MAX_SEND_BUFFERS_PER_QUEUE 256
-
-#define EQ_SIZE (8 * PAGE_SIZE)
-#define LOG2_EQ_THROTTLE 3
-
-#define MAX_PORTS_IN_MANA_DEV 256
-
-struct mana_stats_rx {
- u64 packets;
- u64 bytes;
- u64 xdp_drop;
- u64 xdp_tx;
- u64 xdp_redirect;
- struct u64_stats_sync syncp;
-};
-
-struct mana_stats_tx {
- u64 packets;
- u64 bytes;
- u64 xdp_xmit;
- struct u64_stats_sync syncp;
-};
-
-struct mana_txq {
- struct gdma_queue *gdma_sq;
-
- union {
- u32 gdma_txq_id;
- struct {
- u32 reserved1 : 10;
- u32 vsq_frame : 14;
- u32 reserved2 : 8;
- };
- };
-
- u16 vp_offset;
-
- struct net_device *ndev;
-
- /* The SKBs are sent to the HW and we are waiting for the CQEs. */
- struct sk_buff_head pending_skbs;
- struct netdev_queue *net_txq;
-
- atomic_t pending_sends;
-
- struct mana_stats_tx stats;
-};
-
-/* skb data and frags dma mappings */
-struct mana_skb_head {
- dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
-
- u32 size[MAX_SKB_FRAGS + 1];
-};
-
-#define MANA_HEADROOM sizeof(struct mana_skb_head)
-
-enum mana_tx_pkt_format {
- MANA_SHORT_PKT_FMT = 0,
- MANA_LONG_PKT_FMT = 1,
-};
-
-struct mana_tx_short_oob {
- u32 pkt_fmt : 2;
- u32 is_outer_ipv4 : 1;
- u32 is_outer_ipv6 : 1;
- u32 comp_iphdr_csum : 1;
- u32 comp_tcp_csum : 1;
- u32 comp_udp_csum : 1;
- u32 supress_txcqe_gen : 1;
- u32 vcq_num : 24;
-
- u32 trans_off : 10; /* Transport header offset */
- u32 vsq_frame : 14;
- u32 short_vp_offset : 8;
-}; /* HW DATA */
-
-struct mana_tx_long_oob {
- u32 is_encap : 1;
- u32 inner_is_ipv6 : 1;
- u32 inner_tcp_opt : 1;
- u32 inject_vlan_pri_tag : 1;
- u32 reserved1 : 12;
- u32 pcp : 3; /* 802.1Q */
- u32 dei : 1; /* 802.1Q */
- u32 vlan_id : 12; /* 802.1Q */
-
- u32 inner_frame_offset : 10;
- u32 inner_ip_rel_offset : 6;
- u32 long_vp_offset : 12;
- u32 reserved2 : 4;
-
- u32 reserved3;
- u32 reserved4;
-}; /* HW DATA */
-
-struct mana_tx_oob {
- struct mana_tx_short_oob s_oob;
- struct mana_tx_long_oob l_oob;
-}; /* HW DATA */
-
-enum mana_cq_type {
- MANA_CQ_TYPE_RX,
- MANA_CQ_TYPE_TX,
-};
-
-enum mana_cqe_type {
- CQE_INVALID = 0,
- CQE_RX_OKAY = 1,
- CQE_RX_COALESCED_4 = 2,
- CQE_RX_OBJECT_FENCE = 3,
- CQE_RX_TRUNCATED = 4,
-
- CQE_TX_OKAY = 32,
- CQE_TX_SA_DROP = 33,
- CQE_TX_MTU_DROP = 34,
- CQE_TX_INVALID_OOB = 35,
- CQE_TX_INVALID_ETH_TYPE = 36,
- CQE_TX_HDR_PROCESSING_ERROR = 37,
- CQE_TX_VF_DISABLED = 38,
- CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
- CQE_TX_VPORT_DISABLED = 40,
- CQE_TX_VLAN_TAGGING_VIOLATION = 41,
-};
-
-#define MANA_CQE_COMPLETION 1
-
-struct mana_cqe_header {
- u32 cqe_type : 6;
- u32 client_type : 2;
- u32 vendor_err : 24;
-}; /* HW DATA */
-
-/* NDIS HASH Types */
-#define NDIS_HASH_IPV4 BIT(0)
-#define NDIS_HASH_TCP_IPV4 BIT(1)
-#define NDIS_HASH_UDP_IPV4 BIT(2)
-#define NDIS_HASH_IPV6 BIT(3)
-#define NDIS_HASH_TCP_IPV6 BIT(4)
-#define NDIS_HASH_UDP_IPV6 BIT(5)
-#define NDIS_HASH_IPV6_EX BIT(6)
-#define NDIS_HASH_TCP_IPV6_EX BIT(7)
-#define NDIS_HASH_UDP_IPV6_EX BIT(8)
-
-#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
-#define MANA_HASH_L4 \
- (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
- NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
-
-struct mana_rxcomp_perpkt_info {
- u32 pkt_len : 16;
- u32 reserved1 : 16;
- u32 reserved2;
- u32 pkt_hash;
-}; /* HW DATA */
-
-#define MANA_RXCOMP_OOB_NUM_PPI 4
-
-/* Receive completion OOB */
-struct mana_rxcomp_oob {
- struct mana_cqe_header cqe_hdr;
-
- u32 rx_vlan_id : 12;
- u32 rx_vlantag_present : 1;
- u32 rx_outer_iphdr_csum_succeed : 1;
- u32 rx_outer_iphdr_csum_fail : 1;
- u32 reserved1 : 1;
- u32 rx_hashtype : 9;
- u32 rx_iphdr_csum_succeed : 1;
- u32 rx_iphdr_csum_fail : 1;
- u32 rx_tcp_csum_succeed : 1;
- u32 rx_tcp_csum_fail : 1;
- u32 rx_udp_csum_succeed : 1;
- u32 rx_udp_csum_fail : 1;
- u32 reserved2 : 1;
-
- struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
-
- u32 rx_wqe_offset;
-}; /* HW DATA */
-
-struct mana_tx_comp_oob {
- struct mana_cqe_header cqe_hdr;
-
- u32 tx_data_offset;
-
- u32 tx_sgl_offset : 5;
- u32 tx_wqe_offset : 27;
-
- u32 reserved[12];
-}; /* HW DATA */
-
-struct mana_rxq;
-
-#define CQE_POLLING_BUFFER 512
-
-struct mana_cq {
- struct gdma_queue *gdma_cq;
-
- /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
- u32 gdma_id;
-
- /* Type of the CQ: TX or RX */
- enum mana_cq_type type;
-
- /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
- * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
- */
- struct mana_rxq *rxq;
-
- /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
- * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
- */
- struct mana_txq *txq;
-
- /* Buffer which the CQ handler can copy the CQE's into. */
- struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
-
- /* NAPI data */
- struct napi_struct napi;
- int work_done;
- int budget;
-};
-
-#define GDMA_MAX_RQE_SGES 15
-
-struct mana_recv_buf_oob {
- /* A valid GDMA work request representing the data buffer. */
- struct gdma_wqe_request wqe_req;
-
- void *buf_va;
- dma_addr_t buf_dma_addr;
-
- /* SGL of the buffer going to be sent has part of the work request. */
- u32 num_sge;
- struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
-
- /* Required to store the result of mana_gd_post_work_request.
- * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
- * work queue when the WQE is consumed.
- */
- struct gdma_posted_wqe_info wqe_inf;
-};
-
-struct mana_rxq {
- struct gdma_queue *gdma_rq;
- /* Cache the gdma receive queue id */
- u32 gdma_id;
-
- /* Index of RQ in the vPort, not gdma receive queue id */
- u32 rxq_idx;
-
- u32 datasize;
-
- mana_handle_t rxobj;
-
- struct mana_cq rx_cq;
-
- struct completion fence_event;
-
- struct net_device *ndev;
-
- /* Total number of receive buffers to be allocated */
- u32 num_rx_buf;
-
- u32 buf_index;
-
- struct mana_stats_rx stats;
-
- struct bpf_prog __rcu *bpf_prog;
- struct xdp_rxq_info xdp_rxq;
- struct page *xdp_save_page;
- bool xdp_flush;
- int xdp_rc; /* XDP redirect return code */
-
- /* MUST BE THE LAST MEMBER:
- * Each receive buffer has an associated mana_recv_buf_oob.
- */
- struct mana_recv_buf_oob rx_oobs[];
-};
-
-struct mana_tx_qp {
- struct mana_txq txq;
-
- struct mana_cq tx_cq;
-
- mana_handle_t tx_object;
-};
-
-struct mana_ethtool_stats {
- u64 stop_queue;
- u64 wake_queue;
-};
-
-struct mana_context {
- struct gdma_dev *gdma_dev;
-
- u16 num_ports;
-
- struct mana_eq *eqs;
-
- struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
-};
-
-struct mana_port_context {
- struct mana_context *ac;
- struct net_device *ndev;
-
- u8 mac_addr[ETH_ALEN];
-
- enum TRI_STATE rss_state;
-
- mana_handle_t default_rxobj;
- bool tx_shortform_allowed;
- u16 tx_vp_offset;
-
- struct mana_tx_qp *tx_qp;
-
- /* Indirection Table for RX & TX. The values are queue indexes */
- u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
-
- /* Indirection table containing RxObject Handles */
- mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
-
- /* Hash key used by the NIC */
- u8 hashkey[MANA_HASH_KEY_SIZE];
-
- /* This points to an array of num_queues of RQ pointers. */
- struct mana_rxq **rxqs;
-
- struct bpf_prog *bpf_prog;
-
- /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
- unsigned int max_queues;
- unsigned int num_queues;
-
- mana_handle_t port_handle;
- mana_handle_t pf_filter_handle;
-
- u16 port_idx;
-
- bool port_is_up;
- bool port_st_save; /* Saved port state */
-
- struct mana_ethtool_stats eth_stats;
-};
-
-int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
-int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
- bool update_hash, bool update_tab);
-
-int mana_alloc_queues(struct net_device *ndev);
-int mana_attach(struct net_device *ndev);
-int mana_detach(struct net_device *ndev, bool from_close);
-
-int mana_probe(struct gdma_dev *gd, bool resuming);
-void mana_remove(struct gdma_dev *gd, bool suspending);
-
-void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
-int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
- u32 flags);
-u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
- struct xdp_buff *xdp, void *buf_va, uint pkt_len);
-struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
-void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
-int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
-
-extern const struct ethtool_ops mana_ethtool_ops;
-
-struct mana_obj_spec {
- u32 queue_index;
- u64 gdma_region;
- u32 queue_size;
- u32 attached_eq;
- u32 modr_ctx_id;
-};
-
-enum mana_command_code {
- MANA_QUERY_DEV_CONFIG = 0x20001,
- MANA_QUERY_GF_STAT = 0x20002,
- MANA_CONFIG_VPORT_TX = 0x20003,
- MANA_CREATE_WQ_OBJ = 0x20004,
- MANA_DESTROY_WQ_OBJ = 0x20005,
- MANA_FENCE_RQ = 0x20006,
- MANA_CONFIG_VPORT_RX = 0x20007,
- MANA_QUERY_VPORT_CONFIG = 0x20008,
-
- /* Privileged commands for the PF mode */
- MANA_REGISTER_FILTER = 0x28000,
- MANA_DEREGISTER_FILTER = 0x28001,
- MANA_REGISTER_HW_PORT = 0x28003,
- MANA_DEREGISTER_HW_PORT = 0x28004,
-};
-
-/* Query Device Configuration */
-struct mana_query_device_cfg_req {
- struct gdma_req_hdr hdr;
-
- /* MANA Nic Driver Capability flags */
- u64 mn_drv_cap_flags1;
- u64 mn_drv_cap_flags2;
- u64 mn_drv_cap_flags3;
- u64 mn_drv_cap_flags4;
-
- u32 proto_major_ver;
- u32 proto_minor_ver;
- u32 proto_micro_ver;
-
- u32 reserved;
-}; /* HW DATA */
-
-struct mana_query_device_cfg_resp {
- struct gdma_resp_hdr hdr;
-
- u64 pf_cap_flags1;
- u64 pf_cap_flags2;
- u64 pf_cap_flags3;
- u64 pf_cap_flags4;
-
- u16 max_num_vports;
- u16 reserved;
- u32 max_num_eqs;
-}; /* HW DATA */
-
-/* Query vPort Configuration */
-struct mana_query_vport_cfg_req {
- struct gdma_req_hdr hdr;
- u32 vport_index;
-}; /* HW DATA */
-
-struct mana_query_vport_cfg_resp {
- struct gdma_resp_hdr hdr;
- u32 max_num_sq;
- u32 max_num_rq;
- u32 num_indirection_ent;
- u32 reserved1;
- u8 mac_addr[6];
- u8 reserved2[2];
- mana_handle_t vport;
-}; /* HW DATA */
-
-/* Configure vPort */
-struct mana_config_vport_req {
- struct gdma_req_hdr hdr;
- mana_handle_t vport;
- u32 pdid;
- u32 doorbell_pageid;
-}; /* HW DATA */
-
-struct mana_config_vport_resp {
- struct gdma_resp_hdr hdr;
- u16 tx_vport_offset;
- u8 short_form_allowed;
- u8 reserved;
-}; /* HW DATA */
-
-/* Create WQ Object */
-struct mana_create_wqobj_req {
- struct gdma_req_hdr hdr;
- mana_handle_t vport;
- u32 wq_type;
- u32 reserved;
- u64 wq_gdma_region;
- u64 cq_gdma_region;
- u32 wq_size;
- u32 cq_size;
- u32 cq_moderation_ctx_id;
- u32 cq_parent_qid;
-}; /* HW DATA */
-
-struct mana_create_wqobj_resp {
- struct gdma_resp_hdr hdr;
- u32 wq_id;
- u32 cq_id;
- mana_handle_t wq_obj;
-}; /* HW DATA */
-
-/* Destroy WQ Object */
-struct mana_destroy_wqobj_req {
- struct gdma_req_hdr hdr;
- u32 wq_type;
- u32 reserved;
- mana_handle_t wq_obj_handle;
-}; /* HW DATA */
-
-struct mana_destroy_wqobj_resp {
- struct gdma_resp_hdr hdr;
-}; /* HW DATA */
-
-/* Fence RQ */
-struct mana_fence_rq_req {
- struct gdma_req_hdr hdr;
- mana_handle_t wq_obj_handle;
-}; /* HW DATA */
-
-struct mana_fence_rq_resp {
- struct gdma_resp_hdr hdr;
-}; /* HW DATA */
-
-/* Configure vPort Rx Steering */
-struct mana_cfg_rx_steer_req {
- struct gdma_req_hdr hdr;
- mana_handle_t vport;
- u16 num_indir_entries;
- u16 indir_tab_offset;
- u32 rx_enable;
- u32 rss_enable;
- u8 update_default_rxobj;
- u8 update_hashkey;
- u8 update_indir_tab;
- u8 reserved;
- mana_handle_t default_rxobj;
- u8 hashkey[MANA_HASH_KEY_SIZE];
-}; /* HW DATA */
-
-struct mana_cfg_rx_steer_resp {
- struct gdma_resp_hdr hdr;
-}; /* HW DATA */
-
-/* Register HW vPort */
-struct mana_register_hw_vport_req {
- struct gdma_req_hdr hdr;
- u16 attached_gfid;
- u8 is_pf_default_vport;
- u8 reserved1;
- u8 allow_all_ether_types;
- u8 reserved2;
- u8 reserved3;
- u8 reserved4;
-}; /* HW DATA */
-
-struct mana_register_hw_vport_resp {
- struct gdma_resp_hdr hdr;
- mana_handle_t hw_vport_handle;
-}; /* HW DATA */
-
-/* Deregister HW vPort */
-struct mana_deregister_hw_vport_req {
- struct gdma_req_hdr hdr;
- mana_handle_t hw_vport_handle;
-}; /* HW DATA */
-
-struct mana_deregister_hw_vport_resp {
- struct gdma_resp_hdr hdr;
-}; /* HW DATA */
-
-/* Register filter */
-struct mana_register_filter_req {
- struct gdma_req_hdr hdr;
- mana_handle_t vport;
- u8 mac_addr[6];
- u8 reserved1;
- u8 reserved2;
- u8 reserved3;
- u8 reserved4;
- u16 reserved5;
- u32 reserved6;
- u32 reserved7;
- u32 reserved8;
-}; /* HW DATA */
-
-struct mana_register_filter_resp {
- struct gdma_resp_hdr hdr;
- mana_handle_t filter_handle;
-}; /* HW DATA */
-
-/* Deregister filter */
-struct mana_deregister_filter_req {
- struct gdma_req_hdr hdr;
- mana_handle_t filter_handle;
-}; /* HW DATA */
-
-struct mana_deregister_filter_resp {
- struct gdma_resp_hdr hdr;
-}; /* HW DATA */
-
-#define MANA_MAX_NUM_QUEUES 64
-
-#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
-
-struct mana_tx_package {
- struct gdma_wqe_request wqe_req;
- struct gdma_sge sgl_array[5];
- struct gdma_sge *sgl_ptr;
-
- struct mana_tx_oob tx_oob;
-
- struct gdma_posted_wqe_info wqe_info;
-};
-
-#endif /* _MANA_H */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 421fd39ff3a8..3caea631229c 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -8,7 +8,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp.h>
-#include "mana.h"
+#include <net/mana/mana.h>
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
{
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 27a0f3af8aab..2f6a048dee90 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -12,7 +12,20 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#include "mana.h"
+#include <net/mana/mana.h>
+#include <net/mana/mana_auxiliary.h>
+
+static DEFINE_IDA(mana_adev_ida);
+
+static int mana_adev_idx_alloc(void)
+{
+ return ida_alloc(&mana_adev_ida, GFP_KERNEL);
+}
+
+static void mana_adev_idx_free(int idx)
+{
+ ida_free(&mana_adev_ida, idx);
+}
/* Microsoft Azure Network Adapter (MANA) functions */
@@ -128,7 +141,7 @@ frag_err:
return -ENOMEM;
}
-int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
@@ -176,7 +189,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.wqe_req.client_data_unit = 0;
pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
- WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
+ WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
@@ -315,10 +328,10 @@ static void mana_get_stats64(struct net_device *ndev,
rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
st->rx_packets += packets;
st->rx_bytes += bytes;
@@ -328,10 +341,10 @@ static void mana_get_stats64(struct net_device *ndev,
tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
st->tx_packets += packets;
st->tx_bytes += bytes;
@@ -633,13 +646,48 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
return 0;
}
-static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
- u32 doorbell_pg_id)
+void mana_uncfg_vport(struct mana_port_context *apc)
+{
+ mutex_lock(&apc->vport_mutex);
+ apc->vport_use_count--;
+ WARN_ON(apc->vport_use_count < 0);
+ mutex_unlock(&apc->vport_mutex);
+}
+EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
+
+int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
+ u32 doorbell_pg_id)
{
struct mana_config_vport_resp resp = {};
struct mana_config_vport_req req = {};
int err;
+ /* This function is used to program the Ethernet port in the hardware
+ * table. It can be called from the Ethernet driver or the RDMA driver.
+ *
+ * For Ethernet usage, the hardware supports only one active user on a
+ * physical port. The driver checks on the port usage before programming
+ * the hardware when creating the RAW QP (RDMA driver) or exposing the
+ * device to kernel NET layer (Ethernet driver).
+ *
+ * Because the RDMA driver doesn't know in advance which QP type the
+ * user will create, it exposes the device with all its ports. The user
+ * may not be able to create RAW QP on a port if this port is already
+ * in used by the Ethernet driver from the kernel.
+ *
+ * This physical port limitation only applies to the RAW QP. For RC QP,
+ * the hardware doesn't have this limitation. The user can create RC
+ * QPs on a physical port up to the hardware limits independent of the
+ * Ethernet usage on the same port.
+ */
+ mutex_lock(&apc->vport_mutex);
+ if (apc->vport_use_count > 0) {
+ mutex_unlock(&apc->vport_mutex);
+ return -EBUSY;
+ }
+ apc->vport_use_count++;
+ mutex_unlock(&apc->vport_mutex);
+
mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
sizeof(req), sizeof(resp));
req.vport = apc->port_handle;
@@ -666,9 +714,16 @@ static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
apc->tx_shortform_allowed = resp.short_form_allowed;
apc->tx_vp_offset = resp.tx_vport_offset;
+
+ netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
+ apc->port_handle, protection_dom_id, doorbell_pg_id);
out:
+ if (err)
+ mana_uncfg_vport(apc);
+
return err;
}
+EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
static int mana_cfg_vport_steering(struct mana_port_context *apc,
enum TRI_STATE rx,
@@ -729,16 +784,19 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
resp.hdr.status);
err = -EPROTO;
}
+
+ netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
+ apc->port_handle, num_entries);
out:
kfree(req);
return err;
}
-static int mana_create_wq_obj(struct mana_port_context *apc,
- mana_handle_t vport,
- u32 wq_type, struct mana_obj_spec *wq_spec,
- struct mana_obj_spec *cq_spec,
- mana_handle_t *wq_obj)
+int mana_create_wq_obj(struct mana_port_context *apc,
+ mana_handle_t vport,
+ u32 wq_type, struct mana_obj_spec *wq_spec,
+ struct mana_obj_spec *cq_spec,
+ mana_handle_t *wq_obj)
{
struct mana_create_wqobj_resp resp = {};
struct mana_create_wqobj_req req = {};
@@ -787,9 +845,10 @@ static int mana_create_wq_obj(struct mana_port_context *apc,
out:
return err;
}
+EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
-static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
- mana_handle_t wq_obj)
+void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
+ mana_handle_t wq_obj)
{
struct mana_destroy_wqobj_resp resp = {};
struct mana_destroy_wqobj_req req = {};
@@ -814,6 +873,7 @@ static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
resp.hdr.status);
}
+EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
static void mana_destroy_eq(struct mana_context *ac)
{
@@ -1469,10 +1529,10 @@ static int mana_create_txq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
- wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
+ wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
wq_spec.queue_size = txq->gdma_sq->queue_size;
- cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1487,8 +1547,10 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->gdma_sq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
- txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ txq->gdma_sq->mem_info.dma_region_handle =
+ GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.dma_region_handle =
+ GDMA_INVALID_DMA_REGION;
txq->gdma_txq_id = txq->gdma_sq->id;
@@ -1699,10 +1761,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
- wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
+ wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
wq_spec.queue_size = rxq->gdma_rq->queue_size;
- cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1715,8 +1777,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->gdma_rq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
- rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
rxq->gdma_id = rxq->gdma_rq->id;
cq->gdma_id = cq->gdma_cq->id;
@@ -1797,6 +1859,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
}
mana_destroy_txq(apc);
+ mana_uncfg_vport(apc);
if (gd->gdma_context->is_pf)
mana_pf_deregister_hw_vport(apc);
@@ -2069,12 +2132,16 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
+ mutex_init(&apc->vport_mutex);
+ apc->vport_use_count = 0;
+
ndev->netdev_ops = &mana_devops;
ndev->ethtool_ops = &mana_ethtool_ops;
ndev->mtu = ETH_DATA_LEN;
ndev->max_mtu = ndev->mtu;
ndev->min_mtu = ndev->mtu;
ndev->needed_headroom = MANA_HEADROOM;
+ ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
netif_carrier_off(ndev);
@@ -2112,6 +2179,69 @@ free_net:
return err;
}
+static void adev_release(struct device *dev)
+{
+ struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
+
+ kfree(madev);
+}
+
+static void remove_adev(struct gdma_dev *gd)
+{
+ struct auxiliary_device *adev = gd->adev;
+ int id = adev->id;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+
+ mana_adev_idx_free(id);
+ gd->adev = NULL;
+}
+
+static int add_adev(struct gdma_dev *gd)
+{
+ struct auxiliary_device *adev;
+ struct mana_adev *madev;
+ int ret;
+
+ madev = kzalloc(sizeof(*madev), GFP_KERNEL);
+ if (!madev)
+ return -ENOMEM;
+
+ adev = &madev->adev;
+ ret = mana_adev_idx_alloc();
+ if (ret < 0)
+ goto idx_fail;
+ adev->id = ret;
+
+ adev->name = "rdma";
+ adev->dev.parent = gd->gdma_context->dev;
+ adev->dev.release = adev_release;
+ madev->mdev = gd;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto init_fail;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto add_fail;
+
+ gd->adev = adev;
+ return 0;
+
+add_fail:
+ auxiliary_device_uninit(adev);
+
+init_fail:
+ mana_adev_idx_free(adev->id);
+
+idx_fail:
+ kfree(madev);
+
+ return ret;
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@@ -2179,6 +2309,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
break;
}
}
+
+ err = add_adev(gd);
out:
if (err)
mana_remove(gd, false);
@@ -2195,6 +2327,10 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
int err;
int i;
+ /* adev currently doesn't support suspending, always remove it */
+ if (gd->adev)
+ remove_adev(gd);
+
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
if (!ndev) {
@@ -2227,7 +2363,6 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
}
mana_destroy_eq(ac);
-
out:
mana_gd_deregister_device(gd);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c530db76880f..5b776a33a817 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -5,7 +5,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include "mana.h"
+#include <net/mana/mana.h>
static const struct {
char name[ETH_GSTRING_LEN];
@@ -90,13 +90,13 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
xdp_drop = rx_stats->xdp_drop;
xdp_tx = rx_stats->xdp_tx;
xdp_redirect = rx_stats->xdp_redirect;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
@@ -109,11 +109,11 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
xdp_xmit = tx_stats->xdp_xmit;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
index da255da62176..5553af9c8085 100644
--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -6,7 +6,7 @@
#include <linux/io.h>
#include <linux/mm.h>
-#include "shm_channel.h"
+#include <net/mana/shm_channel.h>
#define PAGE_FRAME_L48_WIDTH_BYTES 6
#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h
deleted file mode 100644
index 5199b41497ff..000000000000
--- a/drivers/net/ethernet/microsoft/mana/shm_channel.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright (c) 2021, Microsoft Corporation. */
-
-#ifndef _SHM_CHANNEL_H
-#define _SHM_CHANNEL_H
-
-struct shm_channel {
- struct device *dev;
- void __iomem *base;
-};
-
-void mana_smc_init(struct shm_channel *sc, struct device *dev,
- void __iomem *base);
-
-int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
- u64 cq_addr, u64 rq_addr, u64 sq_addr,
- u32 eq_msix_index);
-
-int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf);
-
-#endif /* _SHM_CHANNEL_H */
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 13b14110a060..da56f9bfeaf0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -872,10 +872,8 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
return;
}
- /* Handle RX pause in all cases, with 2500base-X this is used for rate
- * adaptation.
- */
- mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
+ if (rx_pause)
+ mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
if (tx_pause)
mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 50858cc10fef..ca4bde861397 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -194,15 +194,6 @@ void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
devlink_port_unregister(dlp);
}
-static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->port.index;
-
- return &ocelot->devlink_ports[port];
-}
-
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
@@ -925,7 +916,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
- .ndo_get_devlink_port = ocelot_get_devlink_port,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -1737,7 +1727,6 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ocelot_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_config = vsc7514_phylink_mac_config,
.mac_link_down = vsc7514_phylink_mac_link_down,
.mac_link_up = vsc7514_phylink_mac_link_up,
@@ -1873,6 +1862,7 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
if (ocelot->fdma)
ocelot_fdma_netdev_init(ocelot, dev);
+ SET_NETDEV_DEVLINK_PORT(dev, &ocelot->devlink_ports[port]);
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index dbd20b125cea..1478c3b21af1 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -9,6 +9,225 @@
#include <linux/workqueue.h>
#include "ocelot.h"
+enum ocelot_stat {
+ OCELOT_STAT_RX_OCTETS,
+ OCELOT_STAT_RX_UNICAST,
+ OCELOT_STAT_RX_MULTICAST,
+ OCELOT_STAT_RX_BROADCAST,
+ OCELOT_STAT_RX_SHORTS,
+ OCELOT_STAT_RX_FRAGMENTS,
+ OCELOT_STAT_RX_JABBERS,
+ OCELOT_STAT_RX_CRC_ALIGN_ERRS,
+ OCELOT_STAT_RX_SYM_ERRS,
+ OCELOT_STAT_RX_64,
+ OCELOT_STAT_RX_65_127,
+ OCELOT_STAT_RX_128_255,
+ OCELOT_STAT_RX_256_511,
+ OCELOT_STAT_RX_512_1023,
+ OCELOT_STAT_RX_1024_1526,
+ OCELOT_STAT_RX_1527_MAX,
+ OCELOT_STAT_RX_PAUSE,
+ OCELOT_STAT_RX_CONTROL,
+ OCELOT_STAT_RX_LONGS,
+ OCELOT_STAT_RX_CLASSIFIED_DROPS,
+ OCELOT_STAT_RX_RED_PRIO_0,
+ OCELOT_STAT_RX_RED_PRIO_1,
+ OCELOT_STAT_RX_RED_PRIO_2,
+ OCELOT_STAT_RX_RED_PRIO_3,
+ OCELOT_STAT_RX_RED_PRIO_4,
+ OCELOT_STAT_RX_RED_PRIO_5,
+ OCELOT_STAT_RX_RED_PRIO_6,
+ OCELOT_STAT_RX_RED_PRIO_7,
+ OCELOT_STAT_RX_YELLOW_PRIO_0,
+ OCELOT_STAT_RX_YELLOW_PRIO_1,
+ OCELOT_STAT_RX_YELLOW_PRIO_2,
+ OCELOT_STAT_RX_YELLOW_PRIO_3,
+ OCELOT_STAT_RX_YELLOW_PRIO_4,
+ OCELOT_STAT_RX_YELLOW_PRIO_5,
+ OCELOT_STAT_RX_YELLOW_PRIO_6,
+ OCELOT_STAT_RX_YELLOW_PRIO_7,
+ OCELOT_STAT_RX_GREEN_PRIO_0,
+ OCELOT_STAT_RX_GREEN_PRIO_1,
+ OCELOT_STAT_RX_GREEN_PRIO_2,
+ OCELOT_STAT_RX_GREEN_PRIO_3,
+ OCELOT_STAT_RX_GREEN_PRIO_4,
+ OCELOT_STAT_RX_GREEN_PRIO_5,
+ OCELOT_STAT_RX_GREEN_PRIO_6,
+ OCELOT_STAT_RX_GREEN_PRIO_7,
+ OCELOT_STAT_TX_OCTETS,
+ OCELOT_STAT_TX_UNICAST,
+ OCELOT_STAT_TX_MULTICAST,
+ OCELOT_STAT_TX_BROADCAST,
+ OCELOT_STAT_TX_COLLISION,
+ OCELOT_STAT_TX_DROPS,
+ OCELOT_STAT_TX_PAUSE,
+ OCELOT_STAT_TX_64,
+ OCELOT_STAT_TX_65_127,
+ OCELOT_STAT_TX_128_255,
+ OCELOT_STAT_TX_256_511,
+ OCELOT_STAT_TX_512_1023,
+ OCELOT_STAT_TX_1024_1526,
+ OCELOT_STAT_TX_1527_MAX,
+ OCELOT_STAT_TX_YELLOW_PRIO_0,
+ OCELOT_STAT_TX_YELLOW_PRIO_1,
+ OCELOT_STAT_TX_YELLOW_PRIO_2,
+ OCELOT_STAT_TX_YELLOW_PRIO_3,
+ OCELOT_STAT_TX_YELLOW_PRIO_4,
+ OCELOT_STAT_TX_YELLOW_PRIO_5,
+ OCELOT_STAT_TX_YELLOW_PRIO_6,
+ OCELOT_STAT_TX_YELLOW_PRIO_7,
+ OCELOT_STAT_TX_GREEN_PRIO_0,
+ OCELOT_STAT_TX_GREEN_PRIO_1,
+ OCELOT_STAT_TX_GREEN_PRIO_2,
+ OCELOT_STAT_TX_GREEN_PRIO_3,
+ OCELOT_STAT_TX_GREEN_PRIO_4,
+ OCELOT_STAT_TX_GREEN_PRIO_5,
+ OCELOT_STAT_TX_GREEN_PRIO_6,
+ OCELOT_STAT_TX_GREEN_PRIO_7,
+ OCELOT_STAT_TX_AGED,
+ OCELOT_STAT_DROP_LOCAL,
+ OCELOT_STAT_DROP_TAIL,
+ OCELOT_STAT_DROP_YELLOW_PRIO_0,
+ OCELOT_STAT_DROP_YELLOW_PRIO_1,
+ OCELOT_STAT_DROP_YELLOW_PRIO_2,
+ OCELOT_STAT_DROP_YELLOW_PRIO_3,
+ OCELOT_STAT_DROP_YELLOW_PRIO_4,
+ OCELOT_STAT_DROP_YELLOW_PRIO_5,
+ OCELOT_STAT_DROP_YELLOW_PRIO_6,
+ OCELOT_STAT_DROP_YELLOW_PRIO_7,
+ OCELOT_STAT_DROP_GREEN_PRIO_0,
+ OCELOT_STAT_DROP_GREEN_PRIO_1,
+ OCELOT_STAT_DROP_GREEN_PRIO_2,
+ OCELOT_STAT_DROP_GREEN_PRIO_3,
+ OCELOT_STAT_DROP_GREEN_PRIO_4,
+ OCELOT_STAT_DROP_GREEN_PRIO_5,
+ OCELOT_STAT_DROP_GREEN_PRIO_6,
+ OCELOT_STAT_DROP_GREEN_PRIO_7,
+ OCELOT_NUM_STATS,
+};
+
+struct ocelot_stat_layout {
+ u32 reg;
+ char name[ETH_GSTRING_LEN];
+};
+
+/* 32-bit counter checked for wraparound by ocelot_port_update_stats()
+ * and copied to ocelot->stats.
+ */
+#define OCELOT_STAT(kind) \
+ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind }
+/* Same as above, except also exported to ethtool -S. Standard counters should
+ * only be exposed to more specific interfaces rather than by their string name.
+ */
+#define OCELOT_STAT_ETHTOOL(kind, ethtool_name) \
+ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind, .name = ethtool_name }
+
+#define OCELOT_COMMON_STATS \
+ OCELOT_STAT_ETHTOOL(RX_OCTETS, "rx_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_UNICAST, "rx_unicast"), \
+ OCELOT_STAT_ETHTOOL(RX_MULTICAST, "rx_multicast"), \
+ OCELOT_STAT_ETHTOOL(RX_BROADCAST, "rx_broadcast"), \
+ OCELOT_STAT_ETHTOOL(RX_SHORTS, "rx_shorts"), \
+ OCELOT_STAT_ETHTOOL(RX_FRAGMENTS, "rx_fragments"), \
+ OCELOT_STAT_ETHTOOL(RX_JABBERS, "rx_jabbers"), \
+ OCELOT_STAT_ETHTOOL(RX_CRC_ALIGN_ERRS, "rx_crc_align_errs"), \
+ OCELOT_STAT_ETHTOOL(RX_SYM_ERRS, "rx_sym_errs"), \
+ OCELOT_STAT_ETHTOOL(RX_64, "rx_frames_below_65_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_65_127, "rx_frames_65_to_127_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_128_255, "rx_frames_128_to_255_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_256_511, "rx_frames_256_to_511_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_512_1023, "rx_frames_512_to_1023_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_1024_1526, "rx_frames_1024_to_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_1527_MAX, "rx_frames_over_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_PAUSE, "rx_pause"), \
+ OCELOT_STAT_ETHTOOL(RX_CONTROL, "rx_control"), \
+ OCELOT_STAT_ETHTOOL(RX_LONGS, "rx_longs"), \
+ OCELOT_STAT_ETHTOOL(RX_CLASSIFIED_DROPS, "rx_classified_drops"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_0, "rx_red_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_1, "rx_red_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_2, "rx_red_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_3, "rx_red_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_4, "rx_red_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_5, "rx_red_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_6, "rx_red_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_7, "rx_red_prio_7"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_0, "rx_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_1, "rx_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_2, "rx_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_3, "rx_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_4, "rx_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_5, "rx_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_6, "rx_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_7, "rx_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_0, "rx_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_1, "rx_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_2, "rx_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_3, "rx_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_4, "rx_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_5, "rx_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_6, "rx_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_7, "rx_green_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_OCTETS, "tx_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_UNICAST, "tx_unicast"), \
+ OCELOT_STAT_ETHTOOL(TX_MULTICAST, "tx_multicast"), \
+ OCELOT_STAT_ETHTOOL(TX_BROADCAST, "tx_broadcast"), \
+ OCELOT_STAT_ETHTOOL(TX_COLLISION, "tx_collision"), \
+ OCELOT_STAT_ETHTOOL(TX_DROPS, "tx_drops"), \
+ OCELOT_STAT_ETHTOOL(TX_PAUSE, "tx_pause"), \
+ OCELOT_STAT_ETHTOOL(TX_64, "tx_frames_below_65_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_65_127, "tx_frames_65_to_127_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_128_255, "tx_frames_128_255_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_256_511, "tx_frames_256_511_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_512_1023, "tx_frames_512_1023_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_1024_1526, "tx_frames_1024_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_1527_MAX, "tx_frames_over_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_0, "tx_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_1, "tx_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_2, "tx_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_3, "tx_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_4, "tx_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_5, "tx_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_6, "tx_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_7, "tx_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_0, "tx_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_1, "tx_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_2, "tx_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_3, "tx_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_4, "tx_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_5, "tx_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_6, "tx_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_7, "tx_green_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_AGED, "tx_aged"), \
+ OCELOT_STAT_ETHTOOL(DROP_LOCAL, "drop_local"), \
+ OCELOT_STAT_ETHTOOL(DROP_TAIL, "drop_tail"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_0, "drop_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_1, "drop_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_2, "drop_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_3, "drop_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_4, "drop_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_5, "drop_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_6, "drop_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_7, "drop_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_0, "drop_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_1, "drop_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_2, "drop_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_3, "drop_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_4, "drop_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_5, "drop_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_6, "drop_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_7, "drop_green_prio_7")
+
+struct ocelot_stats_region {
+ struct list_head node;
+ u32 base;
+ int count;
+ u32 *buf;
+};
+
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
+};
+
/* Read the counters from hardware and keep them in region->buf.
* Caller must hold &ocelot->stat_view_lock.
*/
@@ -93,10 +312,10 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
return;
for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (ocelot->stats_layout[i].name[0] == '\0')
+ if (ocelot_stats_layout[i].name[0] == '\0')
continue;
- memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot_stats_layout[i].name,
ETH_GSTRING_LEN);
}
}
@@ -137,7 +356,7 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
return -EOPNOTSUPP;
for (i = 0; i < OCELOT_NUM_STATS; i++)
- if (ocelot->stats_layout[i].name[0] != '\0')
+ if (ocelot_stats_layout[i].name[0] != '\0')
num_stats++;
return num_stats;
@@ -154,7 +373,7 @@ static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
for (i = 0; i < OCELOT_NUM_STATS; i++) {
int index = port * OCELOT_NUM_STATS + i;
- if (ocelot->stats_layout[i].name[0] == '\0')
+ if (ocelot_stats_layout[i].name[0] == '\0')
continue;
*data++ = ocelot->stats[index];
@@ -383,16 +602,16 @@ EXPORT_SYMBOL(ocelot_port_get_stats64);
static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
{
struct ocelot_stats_region *region = NULL;
- unsigned int last;
+ unsigned int last = 0;
int i;
INIT_LIST_HEAD(&ocelot->stats_regions);
for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (!ocelot->stats_layout[i].reg)
+ if (!ocelot_stats_layout[i].reg)
continue;
- if (region && ocelot->stats_layout[i].reg == last + 4) {
+ if (region && ocelot_stats_layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -400,12 +619,18 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- region->base = ocelot->stats_layout[i].reg;
+ /* enum ocelot_stat must be kept sorted in the same
+ * order as ocelot_stats_layout[i].reg in order to have
+ * efficient bulking
+ */
+ WARN_ON(last >= ocelot_stats_layout[i].reg);
+
+ region->base = ocelot_stats_layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot->stats_layout[i].reg;
+ last = ocelot_stats_layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -456,3 +681,4 @@ void ocelot_stats_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
}
+
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 6f22aea08a64..b097fd4a4061 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -100,10 +100,6 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
- OCELOT_COMMON_STATS,
-};
-
static void ocelot_pll5_init(struct ocelot *ocelot)
{
/* Configure PLL5. This will need a proper CCF driver
@@ -138,7 +134,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
int ret;
ocelot->map = ocelot_regmap;
- ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
@@ -377,9 +372,6 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
return -ENOMEM;
for_each_available_child_of_node(ports, portnp) {
- struct ocelot_port_private *priv;
- struct ocelot_port *ocelot_port;
- struct devlink_port *dlp;
struct regmap *target;
struct resource *res;
char res_name[8];
@@ -420,12 +412,6 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
}
devlink_ports_registered |= BIT(port);
-
- ocelot_port = ocelot->ports[port];
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
- dlp = &ocelot->devlink_ports[port];
- devlink_port_type_eth_set(dlp, priv->dev);
}
/* Initialize unused devlink ports at the end */
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 8844d1ac053a..e785c00b5845 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -54,6 +54,17 @@ config NFP_APP_ABM_NIC
functionality.
Code will be built into the nfp.ko driver.
+config NFP_NET_IPSEC
+ bool "NFP IPsec crypto offload support"
+ depends on NFP
+ depends on XFRM_OFFLOAD
+ default y
+ help
+ Enable driver support IPsec crypto offload on NFP NIC.
+ Say Y, if you are planning to make use of IPsec crypto
+ offload. NOTE that IPsec crypto offload on NFP NIC
+ requires specific FW to work.
+
config NFP_DEBUG
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 9c0861d03634..8a250214e289 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -80,4 +80,6 @@ nfp-objs += \
abm/main.o
endif
+nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o
+
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
index bffe58bb2f27..1df73d658938 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -39,4 +39,27 @@ nfp_net_tls_rx_resync_req(struct net_device *netdev,
}
#endif
+/* IPsec related structures and functions */
+struct nfp_ipsec_offload {
+ u32 seq_hi;
+ u32 seq_low;
+ u32 handle;
+};
+
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline void nfp_net_ipsec_init(struct nfp_net *nn)
+{
+}
+
+static inline void nfp_net_ipsec_clean(struct nfp_net *nn)
+{
+}
+#else
+void nfp_net_ipsec_init(struct nfp_net *nn);
+void nfp_net_ipsec_clean(struct nfp_net *nn);
+bool nfp_net_ipsec_tx_prep(struct nfp_net_dp *dp, struct sk_buff *skb,
+ struct nfp_ipsec_offload *offload_info);
+int nfp_net_ipsec_rx(struct nfp_meta_parsed *meta, struct sk_buff *skb);
+#endif
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
new file mode 100644
index 000000000000..3728870d8e9c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <asm/unaligned.h>
+#include <linux/ktime.h>
+#include <net/xfrm.h>
+
+#include "../nfp_net_ctrl.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+
+#define NFP_NET_IPSEC_MAX_SA_CNT (16 * 1024) /* Firmware support a maximum of 16K SA offload */
+
+/* IPsec config message cmd codes */
+enum nfp_ipsec_cfg_mssg_cmd_codes {
+ NFP_IPSEC_CFG_MSSG_ADD_SA, /* Add a new SA */
+ NFP_IPSEC_CFG_MSSG_INV_SA /* Invalidate an existing SA */
+};
+
+/* IPsec config message response codes */
+enum nfp_ipsec_cfg_mssg_rsp_codes {
+ NFP_IPSEC_CFG_MSSG_OK,
+ NFP_IPSEC_CFG_MSSG_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_VALID,
+ NFP_IPSEC_CFG_MSSG_SA_HASH_ADD_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_HASH_DEL_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_INVALID_CMD
+};
+
+/* Protocol */
+enum nfp_ipsec_sa_prot {
+ NFP_IPSEC_PROTOCOL_AH = 0,
+ NFP_IPSEC_PROTOCOL_ESP = 1
+};
+
+/* Mode */
+enum nfp_ipsec_sa_mode {
+ NFP_IPSEC_PROTMODE_TRANSPORT = 0,
+ NFP_IPSEC_PROTMODE_TUNNEL = 1
+};
+
+/* Cipher types */
+enum nfp_ipsec_sa_cipher {
+ NFP_IPSEC_CIPHER_NULL,
+ NFP_IPSEC_CIPHER_3DES,
+ NFP_IPSEC_CIPHER_AES128,
+ NFP_IPSEC_CIPHER_AES192,
+ NFP_IPSEC_CIPHER_AES256,
+ NFP_IPSEC_CIPHER_AES128_NULL,
+ NFP_IPSEC_CIPHER_AES192_NULL,
+ NFP_IPSEC_CIPHER_AES256_NULL,
+ NFP_IPSEC_CIPHER_CHACHA20
+};
+
+/* Cipher modes */
+enum nfp_ipsec_sa_cipher_mode {
+ NFP_IPSEC_CIMODE_ECB,
+ NFP_IPSEC_CIMODE_CBC,
+ NFP_IPSEC_CIMODE_CFB,
+ NFP_IPSEC_CIMODE_OFB,
+ NFP_IPSEC_CIMODE_CTR
+};
+
+/* Hash types */
+enum nfp_ipsec_sa_hash_type {
+ NFP_IPSEC_HASH_NONE,
+ NFP_IPSEC_HASH_MD5_96,
+ NFP_IPSEC_HASH_SHA1_96,
+ NFP_IPSEC_HASH_SHA256_96,
+ NFP_IPSEC_HASH_SHA384_96,
+ NFP_IPSEC_HASH_SHA512_96,
+ NFP_IPSEC_HASH_MD5_128,
+ NFP_IPSEC_HASH_SHA1_80,
+ NFP_IPSEC_HASH_SHA256_128,
+ NFP_IPSEC_HASH_SHA384_192,
+ NFP_IPSEC_HASH_SHA512_256,
+ NFP_IPSEC_HASH_GF128_128,
+ NFP_IPSEC_HASH_POLY1305_128
+};
+
+/* IPSEC_CFG_MSSG_ADD_SA */
+struct nfp_ipsec_cfg_add_sa {
+ u32 ciph_key[8]; /* Cipher Key */
+ union {
+ u32 auth_key[16]; /* Authentication Key */
+ struct nfp_ipsec_aesgcm { /* AES-GCM-ESP fields */
+ u32 salt; /* Initialized with SA */
+ u32 resv[15];
+ } aesgcm_fields;
+ };
+ struct sa_ctrl_word {
+ uint32_t hash :4; /* From nfp_ipsec_sa_hash_type */
+ uint32_t cimode :4; /* From nfp_ipsec_sa_cipher_mode */
+ uint32_t cipher :4; /* From nfp_ipsec_sa_cipher */
+ uint32_t mode :2; /* From nfp_ipsec_sa_mode */
+ uint32_t proto :2; /* From nfp_ipsec_sa_prot */
+ uint32_t dir :1; /* SA direction */
+ uint32_t resv0 :12;
+ uint32_t encap_dsbl:1; /* Encap/Decap disable */
+ uint32_t resv1 :2; /* Must be set to 0 */
+ } ctrl_word;
+ u32 spi; /* SPI Value */
+ uint32_t pmtu_limit :16; /* PMTU Limit */
+ uint32_t resv0 :5;
+ uint32_t ipv6 :1; /* Outbound IPv6 addr format */
+ uint32_t resv1 :10;
+ u32 resv2[2];
+ u32 src_ip[4]; /* Src IP addr */
+ u32 dst_ip[4]; /* Dst IP addr */
+ u32 resv3[6];
+};
+
+/* IPSEC_CFG_MSSG */
+struct nfp_ipsec_cfg_mssg {
+ union {
+ struct{
+ uint32_t cmd:16; /* One of nfp_ipsec_cfg_mssg_cmd_codes */
+ uint32_t rsp:16; /* One of nfp_ipsec_cfg_mssg_rsp_codes */
+ uint32_t sa_idx:16; /* SA table index */
+ uint32_t spare0:16;
+ struct nfp_ipsec_cfg_add_sa cfg_add_sa;
+ };
+ u32 raw[64];
+ };
+};
+
+static int nfp_ipsec_cfg_cmd_issue(struct nfp_net *nn, int type, int saidx,
+ struct nfp_ipsec_cfg_mssg *msg)
+{
+ int i, msg_size, ret;
+
+ msg->cmd = type;
+ msg->sa_idx = saidx;
+ msg->rsp = 0;
+ msg_size = ARRAY_SIZE(msg->raw);
+
+ for (i = 0; i < msg_size; i++)
+ nn_writel(nn, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
+
+ ret = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_IPSEC);
+ if (ret < 0)
+ return ret;
+
+ /* For now we always read the whole message response back */
+ for (i = 0; i < msg_size; i++)
+ msg->raw[i] = nn_readl(nn, NFP_NET_CFG_MBOX_VAL + 4 * i);
+
+ switch (msg->rsp) {
+ case NFP_IPSEC_CFG_MSSG_OK:
+ return 0;
+ case NFP_IPSEC_CFG_MSSG_SA_INVALID_CMD:
+ return -EINVAL;
+ case NFP_IPSEC_CFG_MSSG_SA_VALID:
+ return -EEXIST;
+ case NFP_IPSEC_CFG_MSSG_FAILED:
+ case NFP_IPSEC_CFG_MSSG_SA_HASH_ADD_FAILED:
+ case NFP_IPSEC_CFG_MSSG_SA_HASH_DEL_FAILED:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int set_aes_keylen(struct nfp_ipsec_cfg_add_sa *cfg, int alg, int keylen)
+{
+ bool aes_gmac = (alg == SADB_X_EALG_NULL_AES_GMAC);
+
+ switch (keylen) {
+ case 128:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES128_NULL :
+ NFP_IPSEC_CIPHER_AES128;
+ break;
+ case 192:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES192_NULL :
+ NFP_IPSEC_CIPHER_AES192;
+ break;
+ case 256:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES256_NULL :
+ NFP_IPSEC_CIPHER_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void set_md5hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_96;
+ break;
+ case 128:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_128;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha1hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_96;
+ break;
+ case 80:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_80;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_256hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_96;
+ break;
+ case 128:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_128;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_384hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_96;
+ break;
+ case 192:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_192;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_96;
+ break;
+ case 256:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_256;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static int nfp_net_xfrm_add_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct nfp_ipsec_cfg_mssg msg = {};
+ int i, key_len, trunc_len, err = 0;
+ struct nfp_ipsec_cfg_add_sa *cfg;
+ struct nfp_net *nn;
+ unsigned int saidx;
+
+ nn = netdev_priv(netdev);
+ cfg = &msg.cfg_add_sa;
+
+ /* General */
+ switch (x->props.mode) {
+ case XFRM_MODE_TUNNEL:
+ cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TUNNEL;
+ break;
+ case XFRM_MODE_TRANSPORT:
+ cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TRANSPORT;
+ break;
+ default:
+ nn_err(nn, "Unsupported mode for xfrm offload\n");
+ return -EINVAL;
+ }
+
+ switch (x->id.proto) {
+ case IPPROTO_ESP:
+ cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_ESP;
+ break;
+ case IPPROTO_AH:
+ cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH;
+ break;
+ default:
+ nn_err(nn, "Unsupported protocol for xfrm offload\n");
+ return -EINVAL;
+ }
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ nn_err(nn, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload\n");
+ return -EINVAL;
+ }
+
+ cfg->spi = ntohl(x->id.spi);
+
+ /* Hash/Authentication */
+ if (x->aalg)
+ trunc_len = x->aalg->alg_trunc_len;
+ else
+ trunc_len = 0;
+
+ switch (x->props.aalgo) {
+ case SADB_AALG_NONE:
+ if (x->aead) {
+ trunc_len = -1;
+ } else {
+ nn_err(nn, "Unsupported authentication algorithm\n");
+ return -EINVAL;
+ }
+ break;
+ case SADB_X_AALG_NULL:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_NONE;
+ trunc_len = -1;
+ break;
+ case SADB_AALG_MD5HMAC:
+ set_md5hmac(cfg, &trunc_len);
+ break;
+ case SADB_AALG_SHA1HMAC:
+ set_sha1hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_256HMAC:
+ set_sha2_256hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_384HMAC:
+ set_sha2_384hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_512HMAC:
+ set_sha2_512hmac(cfg, &trunc_len);
+ break;
+ default:
+ nn_err(nn, "Unsupported authentication algorithm\n");
+ return -EINVAL;
+ }
+
+ if (!trunc_len) {
+ nn_err(nn, "Unsupported authentication algorithm trunc length\n");
+ return -EINVAL;
+ }
+
+ if (x->aalg) {
+ key_len = DIV_ROUND_UP(x->aalg->alg_key_len, BITS_PER_BYTE);
+ if (key_len > sizeof(cfg->auth_key)) {
+ nn_err(nn, "Insufficient space for offloaded auth key\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < key_len / sizeof(cfg->auth_key[0]) ; i++)
+ cfg->auth_key[i] = get_unaligned_be32(x->aalg->alg_key +
+ sizeof(cfg->auth_key[0]) * i);
+ }
+
+ /* Encryption */
+ switch (x->props.ealgo) {
+ case SADB_EALG_NONE:
+ case SADB_EALG_NULL:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
+ break;
+ case SADB_EALG_3DESCBC:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES;
+ break;
+ case SADB_X_EALG_AES_GCM_ICV16:
+ case SADB_X_EALG_NULL_AES_GMAC:
+ if (!x->aead) {
+ nn_err(nn, "Invalid AES key data\n");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ nn_err(nn, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16\n");
+ return -EINVAL;
+ }
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR;
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_GF128_128;
+
+ /* Aead->alg_key_len includes 32-bit salt */
+ if (set_aes_keylen(cfg, x->props.ealgo, x->aead->alg_key_len - 32)) {
+ nn_err(nn, "Unsupported AES key length %d\n", x->aead->alg_key_len);
+ return -EINVAL;
+ }
+ break;
+ case SADB_X_EALG_AESCBC:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ if (!x->ealg) {
+ nn_err(nn, "Invalid AES key data\n");
+ return -EINVAL;
+ }
+ if (set_aes_keylen(cfg, x->props.ealgo, x->ealg->alg_key_len) < 0) {
+ nn_err(nn, "Unsupported AES key length %d\n", x->ealg->alg_key_len);
+ return -EINVAL;
+ }
+ break;
+ default:
+ nn_err(nn, "Unsupported encryption algorithm for offload\n");
+ return -EINVAL;
+ }
+
+ if (x->aead) {
+ int salt_len = 4;
+
+ key_len = DIV_ROUND_UP(x->aead->alg_key_len, BITS_PER_BYTE);
+ key_len -= salt_len;
+
+ if (key_len > sizeof(cfg->ciph_key)) {
+ nn_err(nn, "aead: Insufficient space for offloaded key\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
+ cfg->ciph_key[i] = get_unaligned_be32(x->aead->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+
+ /* Load up the salt */
+ cfg->aesgcm_fields.salt = get_unaligned_be32(x->aead->alg_key + key_len);
+ }
+
+ if (x->ealg) {
+ key_len = DIV_ROUND_UP(x->ealg->alg_key_len, BITS_PER_BYTE);
+
+ if (key_len > sizeof(cfg->ciph_key)) {
+ nn_err(nn, "ealg: Insufficient space for offloaded key\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
+ cfg->ciph_key[i] = get_unaligned_be32(x->ealg->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+ }
+
+ /* IP related info */
+ switch (x->props.family) {
+ case AF_INET:
+ cfg->ipv6 = 0;
+ cfg->src_ip[0] = ntohl(x->props.saddr.a4);
+ cfg->dst_ip[0] = ntohl(x->id.daddr.a4);
+ break;
+ case AF_INET6:
+ cfg->ipv6 = 1;
+ for (i = 0; i < 4; i++) {
+ cfg->src_ip[i] = ntohl(x->props.saddr.a6[i]);
+ cfg->dst_ip[i] = ntohl(x->id.daddr.a6[i]);
+ }
+ break;
+ default:
+ nn_err(nn, "Unsupported address family\n");
+ return -EINVAL;
+ }
+
+ /* Maximum nic IPsec code could handle. Other limits may apply. */
+ cfg->pmtu_limit = 0xffff;
+ cfg->ctrl_word.encap_dsbl = 1;
+
+ /* SA direction */
+ cfg->ctrl_word.dir = x->xso.dir;
+
+ /* Find unused SA data*/
+ err = xa_alloc(&nn->xa_ipsec, &saidx, x,
+ XA_LIMIT(0, NFP_NET_IPSEC_MAX_SA_CNT - 1), GFP_KERNEL);
+ if (err < 0) {
+ nn_err(nn, "Unable to get sa_data number for IPsec\n");
+ return err;
+ }
+
+ /* Allocate saidx and commit the SA */
+ err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_ADD_SA, saidx, &msg);
+ if (err) {
+ xa_erase(&nn->xa_ipsec, saidx);
+ nn_err(nn, "Failed to issue IPsec command err ret=%d\n", err);
+ return err;
+ }
+
+ /* 0 is invalid offload_handle for kernel */
+ x->xso.offload_handle = saidx + 1;
+ return 0;
+}
+
+static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct nfp_ipsec_cfg_mssg msg;
+ struct nfp_net *nn;
+ int err;
+
+ nn = netdev_priv(netdev);
+ err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_INV_SA,
+ x->xso.offload_handle - 1, &msg);
+ if (err)
+ nn_warn(nn, "Failed to invalidate SA in hardware\n");
+
+ xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
+}
+
+static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET)
+ /* Offload with IPv4 options is not supported yet */
+ return ip_hdr(skb)->ihl == 5;
+
+ /* Offload with IPv6 extension headers is not support yet */
+ return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
+}
+
+static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = nfp_net_xfrm_add_state,
+ .xdo_dev_state_delete = nfp_net_xfrm_del_state,
+ .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
+};
+
+void nfp_net_ipsec_init(struct nfp_net *nn)
+{
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC))
+ return;
+
+ xa_init_flags(&nn->xa_ipsec, XA_FLAGS_ALLOC);
+ nn->dp.netdev->xfrmdev_ops = &nfp_net_ipsec_xfrmdev_ops;
+}
+
+void nfp_net_ipsec_clean(struct nfp_net *nn)
+{
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC))
+ return;
+
+ WARN_ON(!xa_empty(&nn->xa_ipsec));
+ xa_destroy(&nn->xa_ipsec);
+}
+
+bool nfp_net_ipsec_tx_prep(struct nfp_net_dp *dp, struct sk_buff *skb,
+ struct nfp_ipsec_offload *offload_info)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct xfrm_state *x;
+
+ x = xfrm_input_state(skb);
+ if (!x)
+ return false;
+
+ offload_info->seq_hi = xo->seq.hi;
+ offload_info->seq_low = xo->seq.low;
+ offload_info->handle = x->xso.offload_handle;
+
+ return true;
+}
+
+int nfp_net_ipsec_rx(struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+ struct net_device *netdev = skb->dev;
+ struct xfrm_offload *xo;
+ struct xfrm_state *x;
+ struct sec_path *sp;
+ struct nfp_net *nn;
+ u32 saidx;
+
+ nn = netdev_priv(netdev);
+
+ saidx = meta->ipsec_saidx - 1;
+ if (saidx >= NFP_NET_IPSEC_MAX_SA_CNT)
+ return -EINVAL;
+
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
+ return -ENOMEM;
+
+ xa_lock(&nn->xa_ipsec);
+ x = xa_load(&nn->xa_ipsec, saidx);
+ xa_unlock(&nn->xa_ipsec);
+ if (!x)
+ return -EINVAL;
+
+ xfrm_state_hold(x);
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index e92860e20a24..88d6d992e7d0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -154,10 +154,11 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
return NULL;
}
-int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
- struct net_device *master,
- struct nfp_fl_pre_lag *pre_act,
- struct netlink_ext_ack *extack)
+static int nfp_fl_lag_get_group_info(struct nfp_app *app,
+ struct net_device *netdev,
+ __be16 *group_id,
+ u8 *batch_ver,
+ u8 *group_inst)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
@@ -165,23 +166,52 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
mutex_lock(&priv->nfp_lag.lock);
group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
- master);
+ netdev);
if (!group) {
mutex_unlock(&priv->nfp_lag.lock);
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
return -ENOENT;
}
- pre_act->group_id = cpu_to_be16(group->group_id);
- temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
- NFP_FL_PRE_LAG_VER_OFF);
- memcpy(pre_act->lag_version, &temp_vers, 3);
- pre_act->instance = group->group_inst;
+ if (group_id)
+ *group_id = cpu_to_be16(group->group_id);
+
+ if (batch_ver) {
+ temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
+ NFP_FL_PRE_LAG_VER_OFF);
+ memcpy(batch_ver, &temp_vers, 3);
+ }
+
+ if (group_inst)
+ *group_inst = group->group_inst;
+
mutex_unlock(&priv->nfp_lag.lock);
return 0;
}
+int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
+ struct net_device *master,
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack)
+{
+ if (nfp_fl_lag_get_group_info(app, master, &pre_act->group_id,
+ pre_act->lag_version,
+ &pre_act->instance)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
+ struct net_device *netdev,
+ struct nfp_tun_neigh_lag *lag)
+{
+ nfp_fl_lag_get_group_info(app, netdev, NULL,
+ lag->lag_version, &lag->lag_instance);
+}
+
int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
{
struct nfp_flower_priv *priv = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 4d960a9641b3..83eaa5ae3cd4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -76,7 +76,9 @@ nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev)
{
+ struct nfp_flower_priv *priv = app->priv;
int ext_port;
+ int gid;
if (nfp_netdev_is_nfp_repr(netdev)) {
return nfp_repr_get_port_id(netdev);
@@ -86,6 +88,13 @@ u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
return 0;
return nfp_flower_internal_port_get_port_id(ext_port);
+ } else if (netif_is_lag_master(netdev) &&
+ priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) {
+ gid = nfp_flower_lag_get_output_id(app, netdev);
+ if (gid < 0)
+ return 0;
+
+ return (NFP_FL_LAG_OUT | gid);
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index cb799d18682d..40372545148e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -52,6 +52,7 @@ struct nfp_app;
#define NFP_FL_FEATS_QOS_PPS BIT(9)
#define NFP_FL_FEATS_QOS_METER BIT(10)
#define NFP_FL_FEATS_DECAP_V2 BIT(11)
+#define NFP_FL_FEATS_TUNNEL_NEIGH_LAG BIT(12)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -69,7 +70,8 @@ struct nfp_app;
NFP_FL_FEATS_VLAN_QINQ | \
NFP_FL_FEATS_QOS_PPS | \
NFP_FL_FEATS_QOS_METER | \
- NFP_FL_FEATS_DECAP_V2)
+ NFP_FL_FEATS_DECAP_V2 | \
+ NFP_FL_FEATS_TUNNEL_NEIGH_LAG)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -104,6 +106,16 @@ struct nfp_fl_tunnel_offloads {
};
/**
+ * struct nfp_tun_neigh_lag - lag info
+ * @lag_version: lag version
+ * @lag_instance: lag instance
+ */
+struct nfp_tun_neigh_lag {
+ u8 lag_version[3];
+ u8 lag_instance;
+};
+
+/**
* struct nfp_tun_neigh - basic neighbour data
* @dst_addr: Destination MAC address
* @src_addr: Source MAC address
@@ -133,12 +145,14 @@ struct nfp_tun_neigh_ext {
* @src_ipv4: Source IPv4 address
* @common: Neighbour/route common info
* @ext: Neighbour/route extended info
+ * @lag: lag port info
*/
struct nfp_tun_neigh_v4 {
__be32 dst_ipv4;
__be32 src_ipv4;
struct nfp_tun_neigh common;
struct nfp_tun_neigh_ext ext;
+ struct nfp_tun_neigh_lag lag;
};
/**
@@ -147,12 +161,14 @@ struct nfp_tun_neigh_v4 {
* @src_ipv6: Source IPv6 address
* @common: Neighbour/route common info
* @ext: Neighbour/route extended info
+ * @lag: lag port info
*/
struct nfp_tun_neigh_v6 {
struct in6_addr dst_ipv6;
struct in6_addr src_ipv6;
struct nfp_tun_neigh common;
struct nfp_tun_neigh_ext ext;
+ struct nfp_tun_neigh_lag lag;
};
/**
@@ -647,6 +663,9 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct netlink_ext_ack *extack);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
+ struct net_device *netdev,
+ struct nfp_tun_neigh_lag *lag);
void nfp_flower_qos_init(struct nfp_app *app);
void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 52f67157bd0f..a8678d5612ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -290,6 +290,11 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
plen -= sizeof(struct nfp_tun_neigh_ext);
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) &&
+ (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
+ plen -= sizeof(struct nfp_tun_neigh_lag);
+
skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
if (!skb)
return -ENOMEM;
@@ -468,6 +473,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_table_params);
if (!nn_entry && !neigh_invalid) {
struct nfp_tun_neigh_ext *ext;
+ struct nfp_tun_neigh_lag *lag;
struct nfp_tun_neigh *common;
nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size,
@@ -488,6 +494,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload->dst_ipv6 = flowi6->daddr;
common = &payload->common;
ext = &payload->ext;
+ lag = &payload->lag;
mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
} else {
struct flowi4 *flowi4 = (struct flowi4 *)flow;
@@ -498,6 +505,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload->dst_ipv4 = flowi4->daddr;
common = &payload->common;
ext = &payload->ext;
+ lag = &payload->lag;
mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
}
ext->host_ctx = cpu_to_be32(U32_MAX);
@@ -505,6 +513,9 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
ext->vlan_tci = cpu_to_be16(U16_MAX);
ether_addr_copy(common->src_addr, netdev->dev_addr);
neigh_ha_snapshot(common->dst_addr, neigh, netdev);
+
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
+ nfp_flower_lag_get_info_from_netdev(app, netdev, lag);
common->port_id = cpu_to_be32(port_id);
if (rhashtable_insert_fast(&priv->neigh_table,
@@ -547,13 +558,38 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (nn_entry->flow)
list_del(&nn_entry->list_head);
kfree(nn_entry);
- } else if (nn_entry && !neigh_invalid && override) {
- mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
- NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
- nfp_tun_link_predt_entries(app, nn_entry);
- nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
- nn_entry->payload,
- GFP_ATOMIC);
+ } else if (nn_entry && !neigh_invalid) {
+ struct nfp_tun_neigh *common;
+ u8 dst_addr[ETH_ALEN];
+ bool is_mac_change;
+
+ if (is_ipv6) {
+ struct nfp_tun_neigh_v6 *payload;
+
+ payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
+ common = &payload->common;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
+ } else {
+ struct nfp_tun_neigh_v4 *payload;
+
+ payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
+ common = &payload->common;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ }
+
+ ether_addr_copy(dst_addr, common->dst_addr);
+ neigh_ha_snapshot(common->dst_addr, neigh, netdev);
+ is_mac_change = !ether_addr_equal(dst_addr, common->dst_addr);
+ if (override || is_mac_change) {
+ if (is_mac_change && nn_entry->flow) {
+ list_del(&nn_entry->list_head);
+ nn_entry->flow = NULL;
+ }
+ nfp_tun_link_predt_entries(app, nn_entry);
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
+ }
}
spin_unlock_bh(&priv->predt_lock);
@@ -593,8 +629,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
- if (!nfp_netdev_is_nfp_repr(n->dev) &&
- !nfp_flower_internal_port_can_offload(app, n->dev))
+ if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET)
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 448c1c1afaee..861082c5dbff 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -4,6 +4,7 @@
#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
+#include <net/xfrm.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -167,28 +168,34 @@ nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync);
}
-static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64 tls_handle)
+static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
+ u64 tls_handle, bool *ipsec)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct nfp_ipsec_offload offload_info;
unsigned char *data;
bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
- if (unlikely(md_dst || tls_handle)) {
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
- md_dst = NULL;
- }
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (xfrm_offload(skb))
+ *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
+#endif
+
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- if (!(md_dst || tls_handle || vlan_insert))
+ if (!(md_dst || tls_handle || vlan_insert || *ipsec))
return 0;
md_bytes = sizeof(meta_id) +
!!md_dst * NFP_NET_META_PORTID_SIZE +
!!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE;
+ vlan_insert * NFP_NET_META_VLAN_SIZE +
+ *ipsec * NFP_NET_META_IPSEC_FIELD_SIZE; /* IPsec has 12 bytes of metadata */
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -218,6 +225,19 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64
meta_id <<= NFP_NET_META_FIELD_SIZE;
meta_id |= NFP_NET_META_VLAN;
}
+ if (*ipsec) {
+ /* IPsec has three consecutive 4-bit IPsec metadata types,
+ * so in total IPsec has three 4 bytes of metadata.
+ */
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_hi, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_low, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.handle - 1, data);
+ meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
+ meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
+ }
data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
@@ -246,6 +266,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t dma_addr;
unsigned int fsize;
u64 tls_handle = 0;
+ bool ipsec = false;
u16 qidx;
dp = &nn->dp;
@@ -273,7 +294,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle);
+ md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle, &ipsec);
if (unlikely(md_bytes < 0))
goto err_flush;
@@ -312,6 +333,8 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
+ if (ipsec)
+ nfp_nfd3_ipsec_tx(txd, skb);
/* Gather DMA */
if (nr_frags > 0) {
__le64 second_half;
@@ -764,6 +787,15 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
+#ifdef CONFIG_NFP_NET_IPSEC
+ case NFP_NET_META_IPSEC:
+ /* Note: IPsec packet will have zero saidx, so need add 1
+ * to indicate packet is IPsec packet within driver.
+ */
+ meta->ipsec_saidx = get_unaligned_be32(data) + 1;
+ data += 4;
+ break;
+#endif
default:
return true;
}
@@ -876,12 +908,11 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
+ int idx, pkts_polled = 0;
bool xdp_tx_cmpl = false;
unsigned int true_bufsz;
struct sk_buff *skb;
- int pkts_polled = 0;
struct xdp_buff xdp;
- int idx;
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
@@ -1081,6 +1112,13 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+#endif
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
new file mode 100644
index 000000000000..e90f8c975903
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <net/xfrm.h>
+
+#include "../nfp_net.h"
+#include "nfd3.h"
+
+void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
+ txd->flags |= NFD3_DESC_TX_CSUM | NFD3_DESC_TX_IP4_CSUM |
+ NFD3_DESC_TX_TCP_CSUM | NFD3_DESC_TX_UDP_CSUM;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
index 7a0df9e6c3c4..9c1c10dcbaee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
@@ -103,4 +103,12 @@ void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
+{
+}
+#else
+void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb);
+#endif
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index dd56207df246..90707346a4ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -445,6 +445,4 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
-struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev);
-
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index cb08d7bf9524..bf6bae557158 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -239,10 +239,6 @@ nfp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
char *buf = NULL;
int err;
- err = devlink_info_driver_name_put(req, "nfp");
- if (err)
- return err;
-
vendor = nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor");
part = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
sn = nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial");
@@ -334,6 +330,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
int serial_len;
int ret;
+ SET_NETDEV_DEVLINK_PORT(port->netdev, &port->dl_port);
+
rtnl_lock();
ret = nfp_devlink_fill_eth_port(port, &eth_port);
rtnl_unlock();
@@ -360,24 +358,3 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
{
devl_port_unregister(&port->dl_port);
}
-
-void nfp_devlink_port_type_eth_set(struct nfp_port *port)
-{
- devlink_port_type_eth_set(&port->dl_port, port->netdev);
-}
-
-void nfp_devlink_port_type_clear(struct nfp_port *port)
-{
- devlink_port_type_clear(&port->dl_port);
-}
-
-struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev)
-{
- struct nfp_port *port;
-
- port = nfp_port_from_netdev(netdev);
- if (!port)
- return NULL;
-
- return &port->dl_port;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index afd3edfa2428..14a751bfe1fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -12,7 +12,6 @@
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
@@ -28,6 +27,7 @@ struct nfp_hwinfo;
struct nfp_mip;
struct nfp_net;
struct nfp_nsp_identify;
+struct nfp_eth_media_buf;
struct nfp_port;
struct nfp_rtsym;
struct nfp_rtsym_table;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index a101ff30a1ae..da33f09facb9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -88,6 +88,9 @@
#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */
#define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */
+/* MC definitions */
+#define NFP_NET_CFG_MAC_MC_MAX 1024 /* The maximum number of MC address per port*/
+
/* Offload definitions */
#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
@@ -263,6 +266,10 @@ struct nfp_meta_parsed {
u8 tpid;
u16 tci;
} vlan;
+
+#ifdef CONFIG_NFP_NET_IPSEC
+ u32 ipsec_saidx;
+#endif
};
struct nfp_net_rx_hash {
@@ -472,6 +479,7 @@ struct nfp_stat_pair {
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
* @ctrl: Local copy of the control register/word.
+ * @ctrl_w1: Local copy of the control register/word1.
* @fl_bufsz: Currently configured size of the freelist buffers
* @xdp_prog: Installed XDP program
* @tx_rings: Array of pre-allocated TX ring structures
@@ -504,6 +512,7 @@ struct nfp_net_dp {
u32 rx_dma_off;
u32 ctrl;
+ u32 ctrl_w1;
u32 fl_bufsz;
struct bpf_prog *xdp_prog;
@@ -541,6 +550,7 @@ struct nfp_net_dp {
* @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
+ * @cap_w1: Extended capabilities word advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
* @rss_hfunc: RSS selected hash function
* @rss_cfg: RSS configuration
@@ -583,6 +593,7 @@ struct nfp_net_dp {
* @qcp_cfg: Pointer to QCP queue used for configuration notification
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
+ * @xa_ipsec: IPsec xarray SA data
* @tlv_caps: Parsed TLV capabilities
* @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
* @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
@@ -617,6 +628,7 @@ struct nfp_net {
u32 id;
u32 cap;
+ u32 cap_w1;
u32 max_mtu;
u8 rss_hfunc;
@@ -670,6 +682,10 @@ struct nfp_net {
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
+#ifdef CONFIG_NFP_NET_IPSEC
+ struct xarray xa_ipsec;
+#endif
+
struct nfp_net_tlv_caps tlv_caps;
unsigned int ktls_tx_conn_cnt;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 27f4786ace4f..2314cf55e821 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -27,7 +27,6 @@
#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
-#include <linux/msi.h>
#include <linux/ethtool.h>
#include <linux/log2.h>
#include <linux/if_vlan.h>
@@ -735,8 +734,9 @@ static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
*/
static void nfp_net_vecs_init(struct nfp_net *nn)
{
+ int numa_node = dev_to_node(&nn->pdev->dev);
struct nfp_net_r_vector *r_vec;
- int r;
+ unsigned int r;
nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn;
@@ -762,7 +762,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
tasklet_disable(&r_vec->tasklet);
}
- cpumask_set_cpu(r, &r_vec->affinity_mask);
+ cpumask_set_cpu(cpumask_local_spread(r, numa_node), &r_vec->affinity_mask);
}
}
@@ -1007,6 +1007,7 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, nn->dp.ctrl_w1);
err = nfp_net_reconfig(nn, update);
if (err) {
nfp_net_clear_config_and_disable(nn);
@@ -1333,18 +1334,59 @@ err_unlock:
return err;
}
+static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr, const u32 cmd)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int ret;
+
+ ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
+ if (ret)
+ return ret;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
+ get_unaligned_be32(addr));
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
+ get_unaligned_be16(addr + 4));
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
+ nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
+ netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
+ return -EINVAL;
+ }
+
+ return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
+}
+
+static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
+}
+
static void nfp_net_set_rx_mode(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- u32 new_ctrl;
+ u32 new_ctrl, new_ctrl_w1;
new_ctrl = nn->dp.ctrl;
+ new_ctrl_w1 = nn->dp.ctrl_w1;
if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
else
new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+ if (netdev->flags & IFF_ALLMULTI)
+ new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_MCAST_FILTER;
+ else
+ new_ctrl_w1 |= nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER;
+
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -1354,13 +1396,21 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
}
- if (new_ctrl == nn->dp.ctrl)
+ if ((nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) &&
+ __dev_mc_sync(netdev, nfp_net_mc_sync, nfp_net_mc_unsync))
+ netdev_err(netdev, "Sync mc address failed\n");
+
+ if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1)
return;
- nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ if (new_ctrl != nn->dp.ctrl)
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ if (new_ctrl_w1 != nn->dp.ctrl_w1)
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
nn->dp.ctrl = new_ctrl;
+ nn->dp.ctrl_w1 = new_ctrl_w1;
}
static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -1631,21 +1681,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
@@ -2013,7 +2063,6 @@ const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_xsk_wakeup = nfp_net_xsk_wakeup,
- .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
.ndo_bridge_getlink = nfp_net_bridge_getlink,
.ndo_bridge_setlink = nfp_net_bridge_setlink,
};
@@ -2044,7 +2093,6 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
- .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
.ndo_bridge_getlink = nfp_net_bridge_getlink,
.ndo_bridge_setlink = nfp_net_bridge_setlink,
};
@@ -2094,7 +2142,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2122,6 +2170,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
"RXCSUM_COMPLETE " : "",
nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
+ nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
nfp_app_extra_cap(nn->app, nn));
}
@@ -2373,6 +2422,12 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
}
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
+
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC)
+ netdev->hw_features |= NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM;
+#endif
+
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -2454,6 +2509,7 @@ static int nfp_net_read_caps(struct nfp_net *nn)
{
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
+ nn->cap_w1 = nn_readq(nn, NFP_NET_CFG_CAP_WORD1);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
/* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
@@ -2543,6 +2599,9 @@ int nfp_net_init(struct nfp_net *nn)
if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
+ nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER;
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -2550,6 +2609,7 @@ int nfp_net_init(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, 0);
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, 0);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
NFP_NET_CFG_UPDATE_GEN);
if (err)
@@ -2565,6 +2625,8 @@ int nfp_net_init(struct nfp_net *nn)
err = nfp_net_tls_init(nn);
if (err)
goto err_clean_mbox;
+
+ nfp_net_ipsec_init(nn);
}
nfp_net_vecs_init(nn);
@@ -2588,6 +2650,7 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
+ nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 6714d5e8fdab..51124309ae1f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -48,6 +48,7 @@
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */
+#define NFP_NET_META_IPSEC 9 /* IPsec SA index for tx and rx */
#define NFP_META_PORT_ID_CTRL ~0U
@@ -55,6 +56,8 @@
#define NFP_NET_META_VLAN_SIZE 4
#define NFP_NET_META_PORTID_SIZE 4
#define NFP_NET_META_CONN_HANDLE_SIZE 8
+#define NFP_NET_META_IPSEC_SIZE 4
+#define NFP_NET_META_IPSEC_FIELD_SIZE 12
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
@@ -257,10 +260,20 @@
#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
-/* 40B reserved for future use (0x0098 - 0x00c0)
+/* 3 words reserved for extended ctrl words (0x0098 - 0x00a4)
+ * 3 words reserved for extended cap words (0x00a4 - 0x00b0)
+ * Currently only one word is used, can be extended in future.
*/
-#define NFP_NET_CFG_RESERVED 0x0098
-#define NFP_NET_CFG_RESERVED_SZ 0x0028
+#define NFP_NET_CFG_CTRL_WORD1 0x0098
+#define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */
+#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
+#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
+
+#define NFP_NET_CFG_CAP_WORD1 0x00a4
+
+/* 16B reserved for future use (0x00b0 - 0x00c0) */
+#define NFP_NET_CFG_RESERVED 0x00b0
+#define NFP_NET_CFG_RESERVED_SZ 0x0010
/* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
@@ -390,17 +403,20 @@
*/
#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
-
+#define NFP_NET_CFG_MBOX_VAL 0x1808
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
-
+#define NFP_NET_CFG_MBOX_CMD_IPSEC 3
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
+#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
+#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
+
/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
@@ -412,6 +428,17 @@
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+/* Multicast filtering using general use mailbox
+ * %NFP_NET_CFG_MULTICAST: Base address of Multicast filter mailbox
+ * %NFP_NET_CFG_MULTICAST_MAC_HI: High 32-bits of Multicast MAC address
+ * %NFP_NET_CFG_MULTICAST_MAC_LO: Low 16-bits of Multicast MAC address
+ * %NFP_NET_CFG_MULTICAST_SZ: Size of the Multicast filter mailbox in bytes
+ */
+#define NFP_NET_CFG_MULTICAST NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_CFG_MULTICAST_MAC_HI NFP_NET_CFG_MULTICAST
+#define NFP_NET_CFG_MULTICAST_MAC_LO (NFP_NET_CFG_MULTICAST + 6)
+#define NFP_NET_CFG_MULTICAST_SZ 0x0006
+
/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 991059d6cb32..a4a89ef3f18b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -293,6 +293,76 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
+static const u16 nfp_eth_media_table[] = {
+ [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+};
+
+static void nfp_add_media_link_mode(struct nfp_port *port,
+ struct nfp_eth_table_port *eth_port,
+ struct ethtool_link_ksettings *cmd)
+{
+ u64 supported_modes[2], advertised_modes[2];
+ struct nfp_eth_media_buf ethm = {
+ .eth_index = eth_port->eth_index,
+ };
+ struct nfp_cpp *cpp = port->app->cpp;
+
+ if (nfp_eth_read_media(cpp, &ethm))
+ return;
+
+ for (u32 i = 0; i < 2; i++) {
+ supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
+ advertised_modes[i] = le64_to_cpu(ethm.advertised_modes[i]);
+ }
+
+ for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
+ if (i < 64) {
+ if (supported_modes[0] & BIT_ULL(i))
+ __set_bit(nfp_eth_media_table[i],
+ cmd->link_modes.supported);
+
+ if (advertised_modes[0] & BIT_ULL(i))
+ __set_bit(nfp_eth_media_table[i],
+ cmd->link_modes.advertising);
+ } else {
+ if (supported_modes[1] & BIT_ULL(i - 64))
+ __set_bit(nfp_eth_media_table[i],
+ cmd->link_modes.supported);
+
+ if (advertised_modes[1] & BIT_ULL(i - 64))
+ __set_bit(nfp_eth_media_table[i],
+ cmd->link_modes.advertising);
+ }
+ }
+}
+
/**
* nfp_net_get_link_ksettings - Get Link Speed settings
* @netdev: network interface device structure
@@ -311,6 +381,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
u16 sts;
/* Init to unknowns */
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
@@ -321,6 +393,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
if (eth_port) {
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ nfp_add_media_link_mode(port, eth_port, cmd);
if (eth_port->supp_aneg) {
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
if (eth_port->aneg == NFP_ANEG_AUTO) {
@@ -686,7 +759,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -694,10 +767,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -707,7 +780,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3bae92dc899e..abfe788d558f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -16,7 +16,6 @@
#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
-#include <linux/msi.h>
#include <linux/random.h>
#include <linux/rtnetlink.h>
@@ -156,22 +155,17 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nfp_net_debugfs_vnic_add(nn, pf->ddir);
- if (nn->port)
- nfp_devlink_port_type_eth_set(nn->port);
-
nfp_net_info(nn);
if (nfp_net_is_data_vnic(nn)) {
err = nfp_app_vnic_init(pf->app, nn);
if (err)
- goto err_devlink_port_type_clean;
+ goto err_debugfs_vnic_clean;
}
return 0;
-err_devlink_port_type_clean:
- if (nn->port)
- nfp_devlink_port_type_clear(nn->port);
+err_debugfs_vnic_clean:
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
err_devlink_port_clean:
@@ -220,8 +214,6 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
if (nfp_net_is_data_vnic(nn))
nfp_app_vnic_clean(pf->app, nn);
- if (nn->port)
- nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
if (nn->port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 8b77582bdfa0..3af1229a3f08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -134,13 +134,13 @@ nfp_repr_get_host_stats64(const struct net_device *netdev,
repr_stats = per_cpu_ptr(repr->stats, i);
do {
- start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
+ start = u64_stats_fetch_begin(&repr_stats->syncp);
tbytes = repr_stats->tx_bytes;
tpkts = repr_stats->tx_packets;
tdrops = repr_stats->tx_drops;
rbytes = repr_stats->rx_bytes;
rpkts = repr_stats->rx_packets;
- } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
@@ -275,7 +275,6 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
void
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 6793cdf9ff11..f8cd157ca1d7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -129,8 +129,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
-void nfp_devlink_port_type_eth_set(struct nfp_port *port);
-void nfp_devlink_port_type_clear(struct nfp_port *port);
/* Mac stats (0x0000 - 0x0200)
* all counters are 64bit.
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 730fea214b8a..7136bc48530b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -100,6 +100,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOADED = 19, /* Is application firmware loaded */
SPCODE_VERSIONS = 21, /* Report FW versions */
SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
+ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */
};
struct nfp_nsp_dma_buf {
@@ -1100,4 +1101,20 @@ int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
kfree(buf);
return ret;
+};
+
+int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg media = {
+ {
+ .code = SPCODE_READ_MEDIA,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &media);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 992d72ac98d3..8f5cab0032d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -65,6 +65,11 @@ static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 28;
}
+static inline bool nfp_nsp_has_read_media(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 33;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
@@ -97,6 +102,47 @@ enum nfp_eth_fec {
NFP_FEC_DISABLED_BIT,
};
+/* link modes about RJ45 haven't been used, so there's no mapping to them */
+enum nfp_ethtool_link_mode_list {
+ NFP_MEDIA_W0_RJ45_10M,
+ NFP_MEDIA_W0_RJ45_10M_HD,
+ NFP_MEDIA_W0_RJ45_100M,
+ NFP_MEDIA_W0_RJ45_100M_HD,
+ NFP_MEDIA_W0_RJ45_1G,
+ NFP_MEDIA_W0_RJ45_2P5G,
+ NFP_MEDIA_W0_RJ45_5G,
+ NFP_MEDIA_W0_RJ45_10G,
+ NFP_MEDIA_1000BASE_CX,
+ NFP_MEDIA_1000BASE_KX,
+ NFP_MEDIA_10GBASE_KX4,
+ NFP_MEDIA_10GBASE_KR,
+ NFP_MEDIA_10GBASE_CX4,
+ NFP_MEDIA_10GBASE_CR,
+ NFP_MEDIA_10GBASE_SR,
+ NFP_MEDIA_10GBASE_ER,
+ NFP_MEDIA_25GBASE_KR,
+ NFP_MEDIA_25GBASE_KR_S,
+ NFP_MEDIA_25GBASE_CR,
+ NFP_MEDIA_25GBASE_CR_S,
+ NFP_MEDIA_25GBASE_SR,
+ NFP_MEDIA_40GBASE_CR4,
+ NFP_MEDIA_40GBASE_KR4,
+ NFP_MEDIA_40GBASE_SR4,
+ NFP_MEDIA_40GBASE_LR4,
+ NFP_MEDIA_50GBASE_KR,
+ NFP_MEDIA_50GBASE_SR,
+ NFP_MEDIA_50GBASE_CR,
+ NFP_MEDIA_50GBASE_LR,
+ NFP_MEDIA_50GBASE_ER,
+ NFP_MEDIA_50GBASE_FR,
+ NFP_MEDIA_100GBASE_KR4,
+ NFP_MEDIA_100GBASE_SR4,
+ NFP_MEDIA_100GBASE_CR4,
+ NFP_MEDIA_100GBASE_KP4,
+ NFP_MEDIA_100GBASE_CR10,
+ NFP_MEDIA_LINK_MODES_NUMBER
+};
+
#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
@@ -256,6 +302,16 @@ enum nfp_nsp_sensor_id {
int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
long *val);
+struct nfp_eth_media_buf {
+ u8 eth_index;
+ u8 reserved[7];
+ __le64 supported_modes[2];
+ __le64 advertised_modes[2];
+};
+
+int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm);
+
#define NFP_NSP_VERSION_BUFSZ 1024 /* reasonable size, not in the ABI */
enum nfp_nsp_versions {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index bb64efec4c46..570ac1bb2122 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -647,3 +647,29 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
lanes, NSP_ETH_CTRL_SET_LANES);
}
+
+int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm)
+{
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (IS_ERR(nsp)) {
+ nfp_err(cpp, "Failed to access the NSP: %pe\n", nsp);
+ return PTR_ERR(nsp);
+ }
+
+ if (!nfp_nsp_has_read_media(nsp)) {
+ nfp_warn(cpp, "Reading media link modes not supported. Please update flash\n");
+ ret = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ ret = nfp_nsp_read_media(nsp, ethm, sizeof(*ethm));
+ if (ret)
+ nfp_err(cpp, "Reading media link modes failed: %pe\n", ERR_PTR(ret));
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return ret;
+}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index daa028729d44..0605d1ee490d 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1734,12 +1734,12 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
u64 tx_packets, tx_bytes, tx_dropped;
do {
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
+ syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp);
rx_packets = src->stat_rx_packets;
rx_bytes = src->stat_rx_bytes;
rx_dropped = src->stat_rx_dropped;
rx_missed_errors = src->stat_rx_missed_errors;
- } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
+ } while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start));
storage->rx_packets += rx_packets;
storage->rx_bytes += rx_bytes;
@@ -1747,11 +1747,11 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
storage->rx_missed_errors += rx_missed_errors;
do {
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
+ syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp);
tx_packets = src->stat_tx_packets;
tx_bytes = src->stat_tx_bytes;
tx_dropped = src->stat_tx_dropped;
- } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+ } while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start));
storage->tx_packets += tx_packets;
storage->tx_bytes += tx_bytes;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 9d0514cfeb5c..626b9113e7c4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -481,6 +481,20 @@ int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
return err;
}
+void ionic_vf_start(struct ionic *ionic)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_ctrl.opcode = IONIC_CMD_VF_CTRL,
+ .vf_ctrl.ctrl_opcode = IONIC_VF_CTRL_START_ALL,
+ };
+
+ if (!(ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_VF_CTRL)))
+ return;
+
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+}
+
/* LIF commands */
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 563c302eb033..2a1d7b9c07e7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -124,6 +124,8 @@ static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+static_assert(sizeof(struct ionic_vf_ctrl_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_ctrl_comp) == 16);
#endif /* __CHECKER__ */
struct ionic_devinfo {
@@ -324,6 +326,7 @@ int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
struct ionic_vf_getattr_comp *comp);
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
+void ionic_vf_start(struct ionic *ionic);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 4297ed9024c0..e6ff757895ab 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -26,10 +26,6 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
char buf[16];
int err = 0;
- err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
- if (err)
- return err;
-
err = devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
idev->dev_info.fw_version);
@@ -90,7 +86,7 @@ int ionic_devlink_register(struct ionic *ionic)
return err;
}
- devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ SET_NETDEV_DEVLINK_PORT(ionic->lif->netdev, &ionic->dl_port);
devlink_register(dl);
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 4a90f611c611..eac09b2375b8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -8,7 +8,7 @@
#define IONIC_DEV_INFO_VERSION 1
#define IONIC_IFNAMSIZ 16
-/**
+/*
* enum ionic_cmd_opcode - Device commands
*/
enum ionic_cmd_opcode {
@@ -54,6 +54,7 @@ enum ionic_cmd_opcode {
/* SR/IOV commands */
IONIC_CMD_VF_GETATTR = 60,
IONIC_CMD_VF_SETATTR = 61,
+ IONIC_CMD_VF_CTRL = 62,
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
@@ -200,6 +201,7 @@ struct ionic_dev_reset_comp {
};
#define IONIC_IDENTITY_VERSION_1 1
+#define IONIC_DEV_IDENTITY_VERSION_2 2
/**
* struct ionic_dev_identify_cmd - Driver/device identify command
@@ -254,6 +256,14 @@ union ionic_drv_identity {
};
/**
+ * enum ionic_dev_capability - Device capabilities
+ * @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations
+ */
+enum ionic_dev_capability {
+ IONIC_DEV_CAP_VF_CTRL = BIT(0),
+};
+
+/**
* union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
@@ -273,6 +283,7 @@ union ionic_drv_identity {
* @hwstamp_mask: Bitmask for subtraction of hardware tick values.
* @hwstamp_mult: Hardware tick to nanosecond multiplier.
* @hwstamp_shift: Hardware tick to nanosecond divisor (power of two).
+ * @capabilities: Device capabilities
*/
union ionic_dev_identity {
struct {
@@ -290,6 +301,7 @@ union ionic_dev_identity {
__le64 hwstamp_mask;
__le32 hwstamp_mult;
__le32 hwstamp_shift;
+ __le64 capabilities;
};
__le32 words[478];
};
@@ -2044,6 +2056,35 @@ struct ionic_vf_getattr_comp {
u8 color;
};
+enum ionic_vf_ctrl_opcode {
+ IONIC_VF_CTRL_START_ALL = 0,
+ IONIC_VF_CTRL_START = 1,
+};
+
+/**
+ * struct ionic_vf_ctrl_cmd - VF control command
+ * @opcode: Opcode for the command
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
+ * @ctrl_opcode: VF control operation type
+ */
+struct ionic_vf_ctrl_cmd {
+ u8 opcode;
+ u8 ctrl_opcode;
+ __le16 vf_index;
+ /* private: */
+ u8 rsvd1[60];
+};
+
+/**
+ * struct ionic_vf_ctrl_comp - VF_CTRL command completion.
+ * @status: Status of the command (enum ionic_status_code)
+ */
+struct ionic_vf_ctrl_comp {
+ u8 status;
+ /* private: */
+ u8 rsvd[15];
+};
+
/**
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
@@ -2865,6 +2906,7 @@ union ionic_dev_cmd {
struct ionic_vf_setattr_cmd vf_setattr;
struct ionic_vf_getattr_cmd vf_getattr;
+ struct ionic_vf_ctrl_cmd vf_ctrl;
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
@@ -2903,6 +2945,7 @@ union ionic_dev_cmd_comp {
struct ionic_vf_setattr_comp vf_setattr;
struct ionic_vf_getattr_comp vf_getattr;
+ struct ionic_vf_ctrl_comp vf_ctrl;
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 19d4848df17d..4dd16c487f2b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1491,7 +1491,13 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
NETIF_F_RXCSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_TSO_ECN;
+ NETIF_F_TSO_ECN |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
if (lif->nxqs > 1)
features |= NETIF_F_RXHASH;
@@ -2220,7 +2226,7 @@ static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd
}
}
-static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
+static int ionic_get_fw_vf_config(struct ionic *ionic, int vf, struct ionic_vf *vfdata)
{
struct ionic_vf_getattr_comp comp = { 0 };
int err;
@@ -2231,14 +2237,14 @@ static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
if (err && comp.status != IONIC_RC_ENOSUPP)
goto err_out;
if (!err)
- ionic->vfs[vf].vlanid = comp.vlanid;
+ vfdata->vlanid = comp.vlanid;
attr = IONIC_VF_ATTR_SPOOFCHK;
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
if (err && comp.status != IONIC_RC_ENOSUPP)
goto err_out;
if (!err)
- ionic->vfs[vf].spoofchk = comp.spoofchk;
+ vfdata->spoofchk = comp.spoofchk;
attr = IONIC_VF_ATTR_LINKSTATE;
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
@@ -2247,13 +2253,13 @@ static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
if (!err) {
switch (comp.linkstate) {
case IONIC_VF_LINK_STATUS_UP:
- ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ vfdata->linkstate = IFLA_VF_LINK_STATE_ENABLE;
break;
case IONIC_VF_LINK_STATUS_DOWN:
- ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ vfdata->linkstate = IFLA_VF_LINK_STATE_DISABLE;
break;
case IONIC_VF_LINK_STATUS_AUTO:
- ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_AUTO;
+ vfdata->linkstate = IFLA_VF_LINK_STATE_AUTO;
break;
default:
dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
@@ -2266,21 +2272,21 @@ static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
if (err && comp.status != IONIC_RC_ENOSUPP)
goto err_out;
if (!err)
- ionic->vfs[vf].maxrate = comp.maxrate;
+ vfdata->maxrate = comp.maxrate;
attr = IONIC_VF_ATTR_TRUST;
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
if (err && comp.status != IONIC_RC_ENOSUPP)
goto err_out;
if (!err)
- ionic->vfs[vf].trusted = comp.trust;
+ vfdata->trusted = comp.trust;
attr = IONIC_VF_ATTR_MAC;
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
if (err && comp.status != IONIC_RC_ENOSUPP)
goto err_out;
if (!err)
- ether_addr_copy(ionic->vfs[vf].macaddr, comp.macaddr);
+ ether_addr_copy(vfdata->macaddr, comp.macaddr);
err_out:
if (err)
@@ -2295,6 +2301,7 @@ static int ionic_get_vf_config(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
+ struct ionic_vf vfdata = { 0 };
int ret = 0;
if (!netif_device_present(netdev))
@@ -2308,14 +2315,14 @@ static int ionic_get_vf_config(struct net_device *netdev,
ivf->vf = vf;
ivf->qos = 0;
- ret = ionic_update_cached_vf_config(ionic, vf);
+ ret = ionic_get_fw_vf_config(ionic, vf, &vfdata);
if (!ret) {
- ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
- ivf->spoofchk = ionic->vfs[vf].spoofchk;
- ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
- ivf->trusted = ionic->vfs[vf].trusted;
- ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ ivf->vlan = le16_to_cpu(vfdata.vlanid);
+ ivf->spoofchk = vfdata.spoofchk;
+ ivf->linkstate = vfdata.linkstate;
+ ivf->max_tx_rate = le32_to_cpu(vfdata.maxrate);
+ ivf->trusted = vfdata.trusted;
+ ether_addr_copy(ivf->mac, vfdata.macaddr);
}
}
@@ -2562,6 +2569,76 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
return ret;
}
+static void ionic_vf_attr_replay(struct ionic_lif *lif)
+{
+ struct ionic_vf_setattr_cmd vfc = { };
+ struct ionic *ionic = lif->ionic;
+ struct ionic_vf *v;
+ int i;
+
+ if (!ionic->vfs)
+ return;
+
+ down_read(&ionic->vf_op_lock);
+
+ for (i = 0; i < ionic->num_vfs; i++) {
+ v = &ionic->vfs[i];
+
+ if (v->stats_pa) {
+ vfc.attr = IONIC_VF_ATTR_STATSADDR;
+ vfc.stats_pa = cpu_to_le64(v->stats_pa);
+ ionic_set_vf_config(ionic, i, &vfc);
+ vfc.stats_pa = 0;
+ }
+
+ if (!is_zero_ether_addr(v->macaddr)) {
+ vfc.attr = IONIC_VF_ATTR_MAC;
+ ether_addr_copy(vfc.macaddr, v->macaddr);
+ ionic_set_vf_config(ionic, i, &vfc);
+ eth_zero_addr(vfc.macaddr);
+ }
+
+ if (v->vlanid) {
+ vfc.attr = IONIC_VF_ATTR_VLAN;
+ vfc.vlanid = v->vlanid;
+ ionic_set_vf_config(ionic, i, &vfc);
+ vfc.vlanid = 0;
+ }
+
+ if (v->maxrate) {
+ vfc.attr = IONIC_VF_ATTR_RATE;
+ vfc.maxrate = v->maxrate;
+ ionic_set_vf_config(ionic, i, &vfc);
+ vfc.maxrate = 0;
+ }
+
+ if (v->spoofchk) {
+ vfc.attr = IONIC_VF_ATTR_SPOOFCHK;
+ vfc.spoofchk = v->spoofchk;
+ ionic_set_vf_config(ionic, i, &vfc);
+ vfc.spoofchk = 0;
+ }
+
+ if (v->trusted) {
+ vfc.attr = IONIC_VF_ATTR_TRUST;
+ vfc.trust = v->trusted;
+ ionic_set_vf_config(ionic, i, &vfc);
+ vfc.trust = 0;
+ }
+
+ if (v->linkstate) {
+ vfc.attr = IONIC_VF_ATTR_LINKSTATE;
+ vfc.linkstate = v->linkstate;
+ ionic_set_vf_config(ionic, i, &vfc);
+ vfc.linkstate = 0;
+ }
+ }
+
+ up_read(&ionic->vf_op_lock);
+
+ ionic_vf_start(ionic);
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
@@ -3042,6 +3119,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
if (err)
goto err_qcqs_free;
+ ionic_vf_attr_replay(lif);
+
if (lif->registered)
ionic_lif_set_netdev_info(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 5456c2b15d9b..a13530ec4dd8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -533,7 +533,7 @@ int ionic_identify(struct ionic *ionic)
sz = min(sizeof(ident->drv), sizeof(idev->dev_cmd_regs->data));
memcpy_toio(&idev->dev_cmd_regs->data, &ident->drv, sz);
- ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1);
+ ionic_dev_cmd_identify(idev, IONIC_DEV_IDENTITY_VERSION_2);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
if (!err) {
sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data));
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index c03986bf2628..0c3977416cd1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -348,16 +348,25 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_rxq_sg_desc *sg_desc;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_buf_info *buf_info;
+ unsigned int fill_threshold;
struct ionic_rxq_desc *desc;
unsigned int remain_len;
unsigned int frag_len;
unsigned int nfrags;
+ unsigned int n_fill;
unsigned int i, j;
unsigned int len;
+ n_fill = ionic_q_space_avail(q);
+
+ fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
+ q->num_descs / IONIC_RX_FILL_DIV);
+ if (n_fill < fill_threshold)
+ return;
+
len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
- for (i = ionic_q_space_avail(q); i; i--) {
+ for (i = n_fill; i; i--) {
nfrags = 0;
remain_len = len;
desc_info = &q->info[q->head_idx];
@@ -511,7 +520,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *cq = napi_to_cq(napi);
struct ionic_dev *idev;
struct ionic_lif *lif;
- u16 rx_fill_threshold;
u32 work_done = 0;
u32 flags = 0;
@@ -521,10 +529,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL);
- rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
- cq->num_descs / IONIC_RX_FILL_DIV);
- if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
- ionic_rx_fill(cq->bound_q);
+ ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
@@ -550,7 +555,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
- u16 rx_fill_threshold;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
@@ -565,10 +569,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
- rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
- rxcq->num_descs / IONIC_RX_FILL_DIV);
- if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
- ionic_rx_fill(rxcq->bound_q);
+ ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(qcq, 0);
@@ -925,8 +926,12 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
len = skb->len;
mss = skb_shinfo(skb)->gso_size;
- outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
- (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
+ outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM));
has_vlan = !!skb_vlan_tag_present(skb);
vlan_tci = skb_vlan_tag_get(skb);
encap = skb->encapsulation;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 5250d1d1e49c..86ecb080b153 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1972,9 +1972,10 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
u8 split_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
+ u8 port_id = 0, pf_id = 0, vf_id = 0;
bool read_using_dmae = false;
u32 thresh;
+ u16 fid;
if (!dump)
return len;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 6bb4e165b592..922c47797af6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -162,10 +162,6 @@ static int qed_devlink_info_get(struct devlink *devlink,
dev_info = &cdev->common_dev_info;
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
-
memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num));
buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index c9c8225f04d6..747cc5e2bb78 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -28,16 +28,19 @@ struct qede_ptp {
};
/**
- * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter.
+ * qede_ptp_adjfine() - Adjust the frequency of the PTP cycle counter.
*
* @info: The PTP clock info structure.
- * @ppb: Parts per billion adjustment from base.
+ * @scaled_ppm: Scaled parts per million adjustment from base.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*
* Return: Zero on success, negative errno otherwise.
*/
-static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
+static int qede_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
struct qede_dev *edev = ptp->edev;
int rc;
@@ -47,7 +50,7 @@ static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
rc = ptp->ops->adjfreq(edev->cdev, ppb);
spin_unlock_bh(&ptp->lock);
} else {
- DP_ERR(edev, "PTP adjfreq called while interface is down\n");
+ DP_ERR(edev, "PTP adjfine called while interface is down\n");
rc = -EFAULT;
}
__qede_unlock(edev);
@@ -462,7 +465,7 @@ int qede_ptp_enable(struct qede_dev *edev)
ptp->clock_info.n_ext_ts = 0;
ptp->clock_info.n_per_out = 0;
ptp->clock_info.pps = 0;
- ptp->clock_info.adjfreq = qede_ptp_adjfreq;
+ ptp->clock_info.adjfine = qede_ptp_adjfine;
ptp->clock_info.adjtime = qede_ptp_adjtime;
ptp->clock_info.gettime64 = qede_ptp_gettime;
ptp->clock_info.settime64 = qede_ptp_settime;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 1b2119b1d48a..3f5e6572d20e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -135,9 +135,9 @@ static void rmnet_get_stats64(struct net_device *dev,
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
+ start = u64_stats_fetch_begin(&pcpu_ptr->syncp);
snapshot = pcpu_ptr->stats; /* struct assignment */
- } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
+ } while (u64_stats_fetch_retry(&pcpu_ptr->syncp, start));
total_stats.rx_pkts += snapshot.rx_pkts;
total_stats.rx_bytes += snapshot.rx_bytes;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 469e2e229c6e..9ce0e8a64ba8 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2532,16 +2532,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
+ start = u64_stats_fetch_begin(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&tp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
+ start = u64_stats_fetch_begin(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&tp->tx_stats.syncp, start));
}
/* Set or clear the multicast filter for this adaptor.
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a73d061d9fcb..a9dcc98b6af1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4559,12 +4559,13 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
+ struct device *d = tp_to_dev(tp);
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
- pm_request_resume(&tp->pci_dev->dev);
+ pm_request_resume(d);
} else {
- pm_runtime_idle(&tp->pci_dev->dev);
+ pm_runtime_idle(d);
}
phy_print_status(tp->phydev);
@@ -5018,7 +5019,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
return -EUNATCH;
}
- tp->phydev->mac_managed_pm = 1;
+ tp->phydev->mac_managed_pm = true;
phy_support_asym_pause(tp->phydev);
@@ -5282,6 +5283,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 8008b2f45934..3ceb57408ed0 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -42,4 +42,16 @@ config RAVB
This driver supports the following SoCs:
- R8A779x.
+config RENESAS_ETHER_SWITCH
+ tristate "Renesas Ethernet Switch support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
+ select MII
+ select PHYLINK
+ help
+ Renesas Ethernet Switch device driver.
+ This driver supports the following SoCs:
+ - R8A779Fx.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index f21ab8c02af0..592005893464 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,3 +8,7 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
+
+rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o
+
+obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 87c4306d66ec..6e4ef7af27bf 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -88,24 +88,17 @@ static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
}
/* PTP clock operations */
-static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ravb_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ravb_private *priv = container_of(ptp, struct ravb_private,
ptp.info);
struct net_device *ndev = priv->ndev;
unsigned long flags;
- u32 diff, addend;
- bool neg_adj = false;
+ u32 addend;
u32 gccr;
- if (ppb < 0) {
- neg_adj = true;
- ppb = -ppb;
- }
- addend = priv->ptp.default_addend;
- diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
-
- addend = neg_adj ? addend - diff : addend + diff;
+ addend = (u32)adjust_by_scaled_ppm(priv->ptp.default_addend,
+ scaled_ppm);
spin_lock_irqsave(&priv->lock, flags);
@@ -295,7 +288,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_per_out = N_PER_OUT,
- .adjfreq = ravb_ptp_adjfreq,
+ .adjfine = ravb_ptp_adjfine,
.adjtime = ravb_ptp_adjtime,
.gettime64 = ravb_ptp_gettime64,
.settime64 = ravb_ptp_settime64,
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
new file mode 100644
index 000000000000..c007e33c47e1
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Renesas R-Car Gen4 gPTP device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "rcar_gen4_ptp.h"
+#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
+
+static const struct rcar_gen4_ptp_reg_offset s4_offs = {
+ .enable = PTPTMEC,
+ .disable = PTPTMDC,
+ .increment = PTPTIVC0,
+ .config_t0 = PTPTOVC00,
+ .config_t1 = PTPTOVC10,
+ .config_t2 = PTPTOVC20,
+ .monitor_t0 = PTPGPTPTM00,
+ .monitor_t1 = PTPGPTPTM10,
+ .monitor_t2 = PTPGPTPTM20,
+};
+
+static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
+ bool neg_adj = scaled_ppm < 0 ? true : false;
+ s64 addend = ptp_priv->default_addend;
+ s64 diff;
+
+ if (neg_adj)
+ scaled_ppm = -scaled_ppm;
+ diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC);
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
+
+ ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0);
+ ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) |
+ ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32);
+}
+
+static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp_priv->lock, flags);
+ _rcar_gen4_ptp_gettime(ptp, ts);
+ spin_unlock_irqrestore(&ptp_priv->lock, flags);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
+
+ iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+ iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2);
+ iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1);
+ iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0);
+ iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable);
+ iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2);
+ iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1);
+ iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0);
+}
+
+static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp_priv->lock, flags);
+ _rcar_gen4_ptp_settime(ptp, ts);
+ spin_unlock_irqrestore(&ptp_priv->lock, flags);
+
+ return 0;
+}
+
+static int rcar_gen4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
+ struct timespec64 ts;
+ unsigned long flags;
+ s64 now;
+
+ spin_lock_irqsave(&ptp_priv->lock, flags);
+ _rcar_gen4_ptp_gettime(ptp, &ts);
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+ _rcar_gen4_ptp_settime(ptp, &ts);
+ spin_unlock_irqrestore(&ptp_priv->lock, flags);
+
+ return 0;
+}
+
+static int rcar_gen4_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info rcar_gen4_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "rcar_gen4_ptp",
+ .max_adj = 50000000,
+ .adjfine = rcar_gen4_ptp_adjfine,
+ .adjtime = rcar_gen4_ptp_adjtime,
+ .gettime64 = rcar_gen4_ptp_gettime,
+ .settime64 = rcar_gen4_ptp_settime,
+ .enable = rcar_gen4_ptp_enable,
+};
+
+static void rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
+ enum rcar_gen4_ptp_reg_layout layout)
+{
+ WARN_ON(layout != RCAR_GEN4_PTP_REG_LAYOUT_S4);
+
+ ptp_priv->offs = &s4_offs;
+}
+
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
+ enum rcar_gen4_ptp_reg_layout layout, u32 clock)
+{
+ if (ptp_priv->initialized)
+ return 0;
+
+ spin_lock_init(&ptp_priv->lock);
+
+ rcar_gen4_ptp_set_offs(ptp_priv, layout);
+
+ ptp_priv->default_addend = clock;
+ iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
+ ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
+ if (IS_ERR(ptp_priv->clock))
+ return PTR_ERR(ptp_priv->clock);
+
+ iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable);
+ ptp_priv->initialized = true;
+
+ return 0;
+}
+
+int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
+{
+ iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+
+ return ptp_clock_unregister(ptp_priv->clock);
+}
+
+struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
+{
+ struct rcar_gen4_ptp_private *ptp;
+
+ ptp = devm_kzalloc(&pdev->dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp)
+ return NULL;
+
+ ptp->info = rcar_gen4_ptp_info;
+
+ return ptp;
+}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
new file mode 100644
index 000000000000..b1bbea8d3a52
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Renesas R-Car Gen4 gPTP device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#ifndef __RCAR_GEN4_PTP_H__
+#define __RCAR_GEN4_PTP_H__
+
+#include <linux/ptp_clock_kernel.h>
+
+#define PTPTIVC_INIT 0x19000000 /* 320MHz */
+#define RCAR_GEN4_PTP_CLOCK_S4 PTPTIVC_INIT
+#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
+
+/* for rcar_gen4_ptp_init */
+enum rcar_gen4_ptp_reg_layout {
+ RCAR_GEN4_PTP_REG_LAYOUT_S4
+};
+
+/* driver's definitions */
+#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0)
+#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1)
+#define RCAR_GEN4_RXTSTAMP_TYPE_ALL (RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT | BIT(2))
+#define RCAR_GEN4_RXTSTAMP_TYPE RCAR_GEN4_RXTSTAMP_TYPE_ALL
+
+#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0)
+
+#define PTPRO 0
+
+enum rcar_gen4_ptp_reg_s4 {
+ PTPTMEC = PTPRO + 0x0010,
+ PTPTMDC = PTPRO + 0x0014,
+ PTPTIVC0 = PTPRO + 0x0020,
+ PTPTOVC00 = PTPRO + 0x0030,
+ PTPTOVC10 = PTPRO + 0x0034,
+ PTPTOVC20 = PTPRO + 0x0038,
+ PTPGPTPTM00 = PTPRO + 0x0050,
+ PTPGPTPTM10 = PTPRO + 0x0054,
+ PTPGPTPTM20 = PTPRO + 0x0058,
+};
+
+struct rcar_gen4_ptp_reg_offset {
+ u16 enable;
+ u16 disable;
+ u16 increment;
+ u16 config_t0;
+ u16 config_t1;
+ u16 config_t2;
+ u16 monitor_t0;
+ u16 monitor_t1;
+ u16 monitor_t2;
+};
+
+struct rcar_gen4_ptp_private {
+ void __iomem *addr;
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ const struct rcar_gen4_ptp_reg_offset *offs;
+ spinlock_t lock; /* For multiple registers access */
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
+ s64 default_addend;
+ bool initialized;
+};
+
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
+ enum rcar_gen4_ptp_reg_layout layout, u32 clock);
+int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
+struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
+
+#endif /* #ifndef __RCAR_GEN4_PTP_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
new file mode 100644
index 000000000000..e42ceaa0099f
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -0,0 +1,1841 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "rswitch.h"
+
+static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
+{
+ u32 val;
+
+ return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
+ 1, RSWITCH_TIMEOUT_US);
+}
+
+static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
+{
+ iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
+}
+
+/* Common Agent block (COMA) */
+static void rswitch_reset(struct rswitch_private *priv)
+{
+ iowrite32(RRC_RR, priv->addr + RRC);
+ iowrite32(RRC_RR_CLR, priv->addr + RRC);
+}
+
+static void rswitch_clock_enable(struct rswitch_private *priv)
+{
+ iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
+}
+
+static void rswitch_clock_disable(struct rswitch_private *priv)
+{
+ iowrite32(RCDC_RCD, priv->addr + RCDC);
+}
+
+static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
+{
+ u32 val = ioread32(coma_addr + RCEC);
+
+ if (val & RCEC_RCE)
+ return (val & BIT(port)) ? true : false;
+ else
+ return false;
+}
+
+static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
+{
+ u32 val;
+
+ if (enable) {
+ val = ioread32(coma_addr + RCEC);
+ iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
+ } else {
+ val = ioread32(coma_addr + RCDC);
+ iowrite32(val | BIT(port), coma_addr + RCDC);
+ }
+}
+
+static int rswitch_bpool_config(struct rswitch_private *priv)
+{
+ u32 val;
+
+ val = ioread32(priv->addr + CABPIRM);
+ if (val & CABPIRM_BPR)
+ return 0;
+
+ iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
+
+ return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
+}
+
+/* R-Switch-2 block (TOP) */
+static void rswitch_top_init(struct rswitch_private *priv)
+{
+ int i;
+
+ for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
+ iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
+}
+
+/* Forwarding engine block (MFWD) */
+static void rswitch_fwd_init(struct rswitch_private *priv)
+{
+ int i;
+
+ /* For ETHA */
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ iowrite32(0, priv->addr + FWPBFC(i));
+ }
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ iowrite32(priv->rdev[i]->rx_queue->index,
+ priv->addr + FWPBFCSDC(GWCA_INDEX, i));
+ iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
+ }
+
+ /* For GWCA */
+ iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
+ iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
+ iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
+ iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+}
+
+/* gPTP timer (gPTP) */
+static void rswitch_get_timestamp(struct rswitch_private *priv,
+ struct timespec64 *ts)
+{
+ priv->ptp_priv->info.gettime64(&priv->ptp_priv->info, ts);
+}
+
+/* Gateway CPU agent block (GWCA) */
+static int rswitch_gwca_change_mode(struct rswitch_private *priv,
+ enum rswitch_gwca_mode mode)
+{
+ int ret;
+
+ if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
+ rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
+
+ iowrite32(mode, priv->addr + GWMC);
+
+ ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
+
+ if (mode == GWMC_OPC_DISABLE)
+ rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
+
+ return ret;
+}
+
+static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
+{
+ iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
+
+ return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
+}
+
+static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
+{
+ iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
+
+ return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
+}
+
+static void rswitch_gwca_set_rate_limit(struct rswitch_private *priv, int rate)
+{
+ u32 gwgrlulc, gwgrlc;
+
+ switch (rate) {
+ case 1000:
+ gwgrlulc = 0x0000005f;
+ gwgrlc = 0x00010260;
+ break;
+ default:
+ dev_err(&priv->pdev->dev, "%s: This rate is not supported (%d)\n", __func__, rate);
+ return;
+ }
+
+ iowrite32(gwgrlulc, priv->addr + GWGRLULC);
+ iowrite32(gwgrlc, priv->addr + GWGRLC);
+}
+
+static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
+{
+ u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
+ int i;
+
+ for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
+ if (dis[i] & mask[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
+{
+ int i;
+
+ for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
+ dis[i] = ioread32(priv->addr + GWDIS(i));
+ dis[i] &= ioread32(priv->addr + GWDIE(i));
+ }
+}
+
+static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
+{
+ u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
+
+ iowrite32(BIT(index % 32), priv->addr + offs);
+}
+
+static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
+{
+ u32 offs = GWDIS(index / 32);
+
+ iowrite32(BIT(index % 32), priv->addr + offs);
+}
+
+static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
+{
+ int index = cur ? gq->cur : gq->dirty;
+
+ if (index + num >= gq->ring_size)
+ index = (index + num) % gq->ring_size;
+ else
+ index += num;
+
+ return index;
+}
+
+static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
+{
+ if (gq->cur >= gq->dirty)
+ return gq->cur - gq->dirty;
+ else
+ return gq->ring_size - gq->dirty + gq->cur;
+}
+
+static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
+{
+ struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
+
+ if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+ return true;
+
+ return false;
+}
+
+static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
+ int start_index, int num)
+{
+ int i, index;
+
+ for (i = 0; i < num; i++) {
+ index = (i + start_index) % gq->ring_size;
+ if (gq->skbs[index])
+ continue;
+ gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
+ PKT_BUF_SZ + RSWITCH_ALIGN - 1);
+ if (!gq->skbs[index])
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--) {
+ index = (i + start_index) % gq->ring_size;
+ dev_kfree_skb(gq->skbs[index]);
+ gq->skbs[index] = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+static void rswitch_gwca_queue_free(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq)
+{
+ int i;
+
+ if (gq->gptp) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_ts_desc) *
+ (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
+ gq->ts_ring = NULL;
+ } else {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), gq->ring, gq->ring_dma);
+ gq->ring = NULL;
+ }
+
+ if (!gq->dir_tx) {
+ for (i = 0; i < gq->ring_size; i++)
+ dev_kfree_skb(gq->skbs[i]);
+ }
+
+ kfree(gq->skbs);
+ gq->skbs = NULL;
+}
+
+static int rswitch_gwca_queue_alloc(struct net_device *ndev,
+ struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq,
+ bool dir_tx, bool gptp, int ring_size)
+{
+ int i, bit;
+
+ gq->dir_tx = dir_tx;
+ gq->gptp = gptp;
+ gq->ring_size = ring_size;
+ gq->ndev = ndev;
+
+ gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
+ if (!gq->skbs)
+ return -ENOMEM;
+
+ if (!dir_tx)
+ rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
+
+ if (gptp)
+ gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_ts_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ else
+ gq->ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ if (!gq->ts_ring && !gq->ring)
+ goto out;
+
+ i = gq->index / 32;
+ bit = BIT(gq->index % 32);
+ if (dir_tx)
+ priv->gwca.tx_irq_bits[i] |= bit;
+ else
+ priv->gwca.rx_irq_bits[i] |= bit;
+
+ return 0;
+
+out:
+ rswitch_gwca_queue_free(ndev, gq);
+
+ return -ENOMEM;
+}
+
+static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
+{
+ desc->dptrl = cpu_to_le32(lower_32_bits(addr));
+ desc->dptrh = upper_32_bits(addr) & 0xff;
+}
+
+static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
+{
+ return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
+}
+
+static int rswitch_gwca_queue_format(struct net_device *ndev,
+ struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq)
+{
+ int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ struct rswitch_ext_desc *desc;
+ struct rswitch_desc *linkfix;
+ dma_addr_t dma_addr;
+ int i;
+
+ memset(gq->ring, 0, tx_ring_size);
+ for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
+ if (!gq->dir_tx) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ gq->skbs[i]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto err;
+
+ desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rswitch_desc_set_dptr(&desc->desc, dma_addr);
+ desc->desc.die_dt = DT_FEMPTY | DIE;
+ } else {
+ desc->desc.die_dt = DT_EEMPTY | DIE;
+ }
+ }
+ rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+ desc->desc.die_dt = DT_LINKFIX;
+
+ linkfix = &priv->linkfix_table[gq->index];
+ linkfix->die_dt = DT_LINKFIX;
+ rswitch_desc_set_dptr(linkfix, gq->ring_dma);
+
+ iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_EDE,
+ priv->addr + GWDCC_OFFS(gq->index));
+
+ return 0;
+
+err:
+ if (!gq->dir_tx) {
+ for (i--, desc = gq->ring; i >= 0; i--, desc++) {
+ dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq,
+ int start_index, int num)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_ext_ts_desc *desc;
+ dma_addr_t dma_addr;
+ int i, index;
+
+ for (i = 0; i < num; i++) {
+ index = (i + start_index) % gq->ring_size;
+ desc = &gq->ts_ring[index];
+ if (!gq->dir_tx) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ gq->skbs[index]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto err;
+
+ desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rswitch_desc_set_dptr(&desc->desc, dma_addr);
+ dma_wmb();
+ desc->desc.die_dt = DT_FEMPTY | DIE;
+ desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
+ } else {
+ desc->desc.die_dt = DT_EEMPTY | DIE;
+ }
+ }
+
+ return 0;
+
+err:
+ if (!gq->dir_tx) {
+ for (i--; i >= 0; i--) {
+ index = (i + start_index) % gq->ring_size;
+ desc = &gq->ts_ring[index];
+ dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
+ struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq)
+{
+ int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ struct rswitch_ext_ts_desc *desc;
+ struct rswitch_desc *linkfix;
+ int err;
+
+ memset(gq->ts_ring, 0, tx_ts_ring_size);
+ err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
+ if (err < 0)
+ return err;
+
+ desc = &gq->ts_ring[gq->ring_size]; /* Last */
+ rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+ desc->desc.die_dt = DT_LINKFIX;
+
+ linkfix = &priv->linkfix_table[gq->index];
+ linkfix->die_dt = DT_LINKFIX;
+ rswitch_desc_set_dptr(linkfix, gq->ring_dma);
+
+ iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_ETS | GWDCC_EDE,
+ priv->addr + GWDCC_OFFS(gq->index));
+
+ return 0;
+}
+
+static int rswitch_gwca_desc_alloc(struct rswitch_private *priv)
+{
+ int i, num_queues = priv->gwca.num_queues;
+ struct device *dev = &priv->pdev->dev;
+
+ priv->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
+ priv->linkfix_table = dma_alloc_coherent(dev, priv->linkfix_table_size,
+ &priv->linkfix_table_dma, GFP_KERNEL);
+ if (!priv->linkfix_table)
+ return -ENOMEM;
+ for (i = 0; i < num_queues; i++)
+ priv->linkfix_table[i].die_dt = DT_EOS;
+
+ return 0;
+}
+
+static void rswitch_gwca_desc_free(struct rswitch_private *priv)
+{
+ if (priv->linkfix_table)
+ dma_free_coherent(&priv->pdev->dev, priv->linkfix_table_size,
+ priv->linkfix_table, priv->linkfix_table_dma);
+ priv->linkfix_table = NULL;
+}
+
+static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq;
+ int index;
+
+ index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
+ if (index >= priv->gwca.num_queues)
+ return NULL;
+ set_bit(index, priv->gwca.used);
+ gq = &priv->gwca.queues[index];
+ memset(gq, 0, sizeof(*gq));
+ gq->index = index;
+
+ return gq;
+}
+
+static void rswitch_gwca_put(struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq)
+{
+ clear_bit(gq->index, priv->gwca.used);
+}
+
+static int rswitch_txdmac_alloc(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_private *priv = rdev->priv;
+ int err;
+
+ rdev->tx_queue = rswitch_gwca_get(priv);
+ if (!rdev->tx_queue)
+ return -EBUSY;
+
+ err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
+ TX_RING_SIZE);
+ if (err < 0) {
+ rswitch_gwca_put(priv, rdev->tx_queue);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rswitch_txdmac_free(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ rswitch_gwca_queue_free(ndev, rdev->tx_queue);
+ rswitch_gwca_put(rdev->priv, rdev->tx_queue);
+}
+
+static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
+{
+ struct rswitch_device *rdev = priv->rdev[index];
+
+ return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
+}
+
+static int rswitch_rxdmac_alloc(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_private *priv = rdev->priv;
+ int err;
+
+ rdev->rx_queue = rswitch_gwca_get(priv);
+ if (!rdev->rx_queue)
+ return -EBUSY;
+
+ err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
+ RX_RING_SIZE);
+ if (err < 0) {
+ rswitch_gwca_put(priv, rdev->rx_queue);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rswitch_rxdmac_free(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ rswitch_gwca_queue_free(ndev, rdev->rx_queue);
+ rswitch_gwca_put(rdev->priv, rdev->rx_queue);
+}
+
+static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
+{
+ struct rswitch_device *rdev = priv->rdev[index];
+ struct net_device *ndev = rdev->ndev;
+
+ return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
+}
+
+static int rswitch_gwca_hw_init(struct rswitch_private *priv)
+{
+ int i, err;
+
+ err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
+ if (err < 0)
+ return err;
+ err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
+ if (err < 0)
+ return err;
+
+ err = rswitch_gwca_mcast_table_reset(priv);
+ if (err < 0)
+ return err;
+ err = rswitch_gwca_axi_ram_reset(priv);
+ if (err < 0)
+ return err;
+
+ iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
+ iowrite32(0, priv->addr + GWTTFC);
+ iowrite32(lower_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC1);
+ iowrite32(upper_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC0);
+ rswitch_gwca_set_rate_limit(priv, priv->gwca.speed);
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ err = rswitch_rxdmac_init(priv, i);
+ if (err < 0)
+ return err;
+ err = rswitch_txdmac_init(priv, i);
+ if (err < 0)
+ return err;
+ }
+
+ err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
+ if (err < 0)
+ return err;
+ return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
+}
+
+static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
+{
+ int err;
+
+ err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
+ if (err < 0)
+ return err;
+ err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
+ if (err < 0)
+ return err;
+
+ return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
+}
+
+static int rswitch_gwca_halt(struct rswitch_private *priv)
+{
+ int err;
+
+ priv->gwca_halt = true;
+ err = rswitch_gwca_hw_deinit(priv);
+ dev_err(&priv->pdev->dev, "halted (%d)\n", err);
+
+ return err;
+}
+
+static bool rswitch_rx(struct net_device *ndev, int *quota)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->rx_queue;
+ struct rswitch_ext_ts_desc *desc;
+ int limit, boguscnt, num, ret;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 pkt_len;
+ u32 get_ts;
+
+ boguscnt = min_t(int, gq->ring_size, *quota);
+ limit = boguscnt;
+
+ desc = &gq->ts_ring[gq->cur];
+ while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
+ if (--boguscnt < 0)
+ break;
+ dma_rmb();
+ pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
+ skb = gq->skbs[gq->cur];
+ gq->skbs[gq->cur] = NULL;
+ dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ rdev->ndev->stats.rx_packets++;
+ rdev->ndev->stats.rx_bytes += pkt_len;
+
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ desc = &gq->ts_ring[gq->cur];
+ }
+
+ num = rswitch_get_num_cur_queues(gq);
+ ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
+ if (ret < 0)
+ goto err;
+ ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
+ if (ret < 0)
+ goto err;
+ gq->dirty = rswitch_next_queue_index(gq, false, num);
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+
+err:
+ rswitch_gwca_halt(rdev->priv);
+
+ return 0;
+}
+
+static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ struct rswitch_ext_desc *desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ int free_num = 0;
+ int size;
+
+ for (; rswitch_get_num_cur_queues(gq) > 0;
+ gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
+ desc = &gq->ring[gq->dirty];
+ if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
+ skb = gq->skbs[gq->dirty];
+ if (skb) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ rswitch_get_timestamp(rdev->priv, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+ dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(gq->skbs[gq->dirty]);
+ gq->skbs[gq->dirty] = NULL;
+ free_num++;
+ }
+ desc->desc.die_dt = DT_EEMPTY;
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += size;
+ }
+
+ return free_num;
+}
+
+static int rswitch_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct rswitch_private *priv;
+ struct rswitch_device *rdev;
+ int quota = budget;
+
+ rdev = netdev_priv(ndev);
+ priv = rdev->priv;
+
+retry:
+ rswitch_tx_free(ndev, true);
+
+ if (rswitch_rx(ndev, &quota))
+ goto out;
+ else if (rdev->priv->gwca_halt)
+ goto err;
+ else if (rswitch_is_queue_rxed(rdev->rx_queue))
+ goto retry;
+
+ netif_wake_subqueue(ndev, 0);
+
+ napi_complete(napi);
+
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+
+out:
+ return budget - quota;
+
+err:
+ napi_complete(napi);
+
+ return 0;
+}
+
+static void rswitch_queue_interrupt(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ if (napi_schedule_prep(&rdev->napi)) {
+ rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
+ rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ __napi_schedule(&rdev->napi);
+ }
+}
+
+static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
+{
+ struct rswitch_gwca_queue *gq;
+ int i, index, bit;
+
+ for (i = 0; i < priv->gwca.num_queues; i++) {
+ gq = &priv->gwca.queues[i];
+ index = gq->index / 32;
+ bit = BIT(gq->index % 32);
+ if (!(dis[index] & bit))
+ continue;
+
+ rswitch_ack_data_irq(priv, gq->index);
+ rswitch_queue_interrupt(gq->ndev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
+{
+ struct rswitch_private *priv = dev_id;
+ u32 dis[RSWITCH_NUM_IRQ_REGS];
+ irqreturn_t ret = IRQ_NONE;
+
+ rswitch_get_data_irq_status(priv, dis);
+
+ if (rswitch_is_any_data_irq(priv, dis, true) ||
+ rswitch_is_any_data_irq(priv, dis, false))
+ ret = rswitch_data_irq(priv, dis);
+
+ return ret;
+}
+
+static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
+{
+ char *resource_name, *irq_name;
+ int i, ret, irq;
+
+ for (i = 0; i < GWCA_NUM_IRQS; i++) {
+ resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
+ if (!resource_name)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname(priv->pdev, resource_name);
+ kfree(resource_name);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
+ GWCA_IRQ_NAME, i);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
+ 0, irq_name, priv);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
+static int rswitch_etha_change_mode(struct rswitch_etha *etha,
+ enum rswitch_etha_mode mode)
+{
+ int ret;
+
+ if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
+ rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
+
+ iowrite32(mode, etha->addr + EAMC);
+
+ ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
+
+ if (mode == EAMC_OPC_DISABLE)
+ rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
+
+ return ret;
+}
+
+static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
+{
+ u32 mrmac0 = ioread32(etha->addr + MRMAC0);
+ u32 mrmac1 = ioread32(etha->addr + MRMAC1);
+ u8 *mac = &etha->mac_addr[0];
+
+ mac[0] = (mrmac0 >> 8) & 0xFF;
+ mac[1] = (mrmac0 >> 0) & 0xFF;
+ mac[2] = (mrmac1 >> 24) & 0xFF;
+ mac[3] = (mrmac1 >> 16) & 0xFF;
+ mac[4] = (mrmac1 >> 8) & 0xFF;
+ mac[5] = (mrmac1 >> 0) & 0xFF;
+}
+
+static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
+{
+ iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
+ iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
+ etha->addr + MRMAC1);
+}
+
+static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
+{
+ iowrite32(MLVC_PLV, etha->addr + MLVC);
+
+ return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
+}
+
+static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
+{
+ u32 val;
+
+ rswitch_etha_write_mac_address(etha, mac);
+
+ switch (etha->speed) {
+ case 100:
+ val = MPIC_LSC_100M;
+ break;
+ case 1000:
+ val = MPIC_LSC_1G;
+ break;
+ case 2500:
+ val = MPIC_LSC_2_5G;
+ break;
+ default:
+ return;
+ }
+
+ iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
+}
+
+static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
+{
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
+ rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+}
+
+static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
+{
+ int err;
+
+ err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
+ if (err < 0)
+ return err;
+ err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
+ if (err < 0)
+ return err;
+
+ iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
+ rswitch_rmac_setting(etha, mac);
+ rswitch_etha_enable_mii(etha);
+
+ err = rswitch_etha_wait_link_verification(etha);
+ if (err < 0)
+ return err;
+
+ err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
+ if (err < 0)
+ return err;
+
+ return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
+}
+
+static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
+ int phyad, int devad, int regad, int data)
+{
+ int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
+ u32 val;
+ int ret;
+
+ if (devad == 0xffffffff)
+ return -ENODEV;
+
+ writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
+
+ val = MPSM_PSME | MPSM_MFF_C45;
+ iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
+
+ ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ if (ret)
+ return ret;
+
+ rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+
+ if (read) {
+ writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
+
+ ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
+ if (ret)
+ return ret;
+
+ ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
+
+ rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
+ } else {
+ iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
+ etha->addr + MPSM);
+
+ ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ }
+
+ return ret;
+}
+
+static int rswitch_etha_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct rswitch_etha *etha = bus->priv;
+ int mode, devad, regad;
+
+ mode = regnum & MII_ADDR_C45;
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ regad = regnum & MII_REGADDR_C45_MASK;
+
+ /* Not support Clause 22 access method */
+ if (!mode)
+ return -EOPNOTSUPP;
+
+ return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+}
+
+static int rswitch_etha_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
+ int mode, devad, regad;
+
+ mode = regnum & MII_ADDR_C45;
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ regad = regnum & MII_REGADDR_C45_MASK;
+
+ /* Not support Clause 22 access method */
+ if (!mode)
+ return -EOPNOTSUPP;
+
+ return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+}
+
+/* Call of_node_put(port) after done */
+static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
+{
+ struct device_node *ports, *port;
+ int err = 0;
+ u32 index;
+
+ ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
+ "ethernet-ports");
+ if (!ports)
+ return NULL;
+
+ for_each_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &index);
+ if (err < 0) {
+ port = NULL;
+ goto out;
+ }
+ if (index == rdev->etha->index)
+ break;
+ }
+
+out:
+ of_node_put(ports);
+
+ return port;
+}
+
+/* Call of_node_put(mdio) after done */
+static struct device_node *rswitch_get_mdio_node(struct rswitch_device *rdev)
+{
+ struct device_node *port, *mdio;
+
+ port = rswitch_get_port_node(rdev);
+ if (!port)
+ return NULL;
+
+ mdio = of_get_child_by_name(port, "mdio");
+ of_node_put(port);
+
+ return mdio;
+}
+
+static int rswitch_etha_get_params(struct rswitch_device *rdev)
+{
+ struct device_node *port;
+ int err;
+
+ port = rswitch_get_port_node(rdev);
+ if (!port)
+ return -ENODEV;
+
+ err = of_get_phy_mode(port, &rdev->etha->phy_interface);
+ of_node_put(port);
+
+ switch (rdev->etha->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ rdev->etha->speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ rdev->etha->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ rdev->etha->speed = SPEED_2500;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int rswitch_mii_register(struct rswitch_device *rdev)
+{
+ struct device_node *mdio_np;
+ struct mii_bus *mii_bus;
+ int err;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "rswitch_mii";
+ sprintf(mii_bus->id, "etha%d", rdev->etha->index);
+ mii_bus->priv = rdev->etha;
+ mii_bus->read = rswitch_etha_mii_read;
+ mii_bus->write = rswitch_etha_mii_write;
+ mii_bus->parent = &rdev->priv->pdev->dev;
+
+ mdio_np = rswitch_get_mdio_node(rdev);
+ err = of_mdiobus_register(mii_bus, mdio_np);
+ if (err < 0) {
+ mdiobus_free(mii_bus);
+ goto out;
+ }
+
+ rdev->etha->mii = mii_bus;
+
+out:
+ of_node_put(mdio_np);
+
+ return err;
+}
+
+static void rswitch_mii_unregister(struct rswitch_device *rdev)
+{
+ if (rdev->etha->mii) {
+ mdiobus_unregister(rdev->etha->mii);
+ mdiobus_free(rdev->etha->mii);
+ rdev->etha->mii = NULL;
+ }
+}
+
+static void rswitch_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void rswitch_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+}
+
+static void rswitch_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ /* Current hardware cannot change speed at runtime */
+}
+
+static const struct phylink_mac_ops rswitch_phylink_ops = {
+ .mac_config = rswitch_mac_config,
+ .mac_link_down = rswitch_mac_link_down,
+ .mac_link_up = rswitch_mac_link_up,
+};
+
+static int rswitch_phylink_init(struct rswitch_device *rdev)
+{
+ struct device_node *port;
+ struct phylink *phylink;
+ int err;
+
+ port = rswitch_get_port_node(rdev);
+ if (!port)
+ return -ENODEV;
+
+ rdev->phylink_config.dev = &rdev->ndev->dev;
+ rdev->phylink_config.type = PHYLINK_NETDEV;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, rdev->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII, rdev->phylink_config.supported_interfaces);
+ rdev->phylink_config.mac_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+
+ phylink = phylink_create(&rdev->phylink_config, &port->fwnode,
+ rdev->etha->phy_interface, &rswitch_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto out;
+ }
+
+ rdev->phylink = phylink;
+ err = phylink_of_phy_connect(rdev->phylink, port, rdev->etha->phy_interface);
+out:
+ of_node_put(port);
+
+ return err;
+}
+
+static void rswitch_phylink_deinit(struct rswitch_device *rdev)
+{
+ rtnl_lock();
+ phylink_disconnect_phy(rdev->phylink);
+ rtnl_unlock();
+ phylink_destroy(rdev->phylink);
+}
+
+static int rswitch_serdes_set_params(struct rswitch_device *rdev)
+{
+ struct device_node *port = rswitch_get_port_node(rdev);
+ struct phy *serdes;
+ int err;
+
+ serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
+ of_node_put(port);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
+ rdev->etha->phy_interface);
+ if (err < 0)
+ return err;
+
+ return phy_set_speed(serdes, rdev->etha->speed);
+}
+
+static int rswitch_serdes_init(struct rswitch_device *rdev)
+{
+ struct device_node *port = rswitch_get_port_node(rdev);
+ struct phy *serdes;
+
+ serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
+ of_node_put(port);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ return phy_init(serdes);
+}
+
+static int rswitch_serdes_deinit(struct rswitch_device *rdev)
+{
+ struct device_node *port = rswitch_get_port_node(rdev);
+ struct phy *serdes;
+
+ serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
+ of_node_put(port);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
+
+ return phy_exit(serdes);
+}
+
+static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
+{
+ int err;
+
+ if (!rdev->etha->operated) {
+ err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
+ if (err < 0)
+ return err;
+ rdev->etha->operated = true;
+ }
+
+ err = rswitch_mii_register(rdev);
+ if (err < 0)
+ return err;
+
+ err = rswitch_phylink_init(rdev);
+ if (err < 0)
+ goto err_phylink_init;
+
+ err = rswitch_serdes_set_params(rdev);
+ if (err < 0)
+ goto err_serdes_set_params;
+
+ return 0;
+
+err_serdes_set_params:
+ rswitch_phylink_deinit(rdev);
+
+err_phylink_init:
+ rswitch_mii_unregister(rdev);
+
+ return err;
+}
+
+static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
+{
+ rswitch_phylink_deinit(rdev);
+ rswitch_mii_unregister(rdev);
+}
+
+static int rswitch_ether_port_init_all(struct rswitch_private *priv)
+{
+ int i, err;
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ err = rswitch_ether_port_init_one(priv->rdev[i]);
+ if (err)
+ goto err_init_one;
+ }
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ err = rswitch_serdes_init(priv->rdev[i]);
+ if (err)
+ goto err_serdes;
+ }
+
+ return 0;
+
+err_serdes:
+ for (i--; i >= 0; i--)
+ rswitch_serdes_deinit(priv->rdev[i]);
+ i = RSWITCH_NUM_PORTS;
+
+err_init_one:
+ for (i--; i >= 0; i--)
+ rswitch_ether_port_deinit_one(priv->rdev[i]);
+
+ return err;
+}
+
+static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
+{
+ int i;
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_serdes_deinit(priv->rdev[i]);
+ rswitch_ether_port_deinit_one(priv->rdev[i]);
+ }
+}
+
+static int rswitch_open(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ phylink_start(rdev->phylink);
+
+ napi_enable(&rdev->napi);
+ netif_start_queue(ndev);
+
+ rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+
+ return 0;
+};
+
+static int rswitch_stop(struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ netif_tx_stop_all_queues(ndev);
+
+ rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
+ rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+
+ phylink_stop(rdev->phylink);
+ napi_disable(&rdev->napi);
+
+ return 0;
+};
+
+static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ struct rswitch_ext_desc *desc;
+ int ret = NETDEV_TX_OK;
+ dma_addr_t dma_addr;
+
+ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
+ netif_stop_subqueue(ndev, 0);
+ return ret;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return ret;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ gq->skbs[gq->cur] = skb;
+ desc = &gq->ring[gq->cur];
+ rswitch_desc_set_dptr(&desc->desc, dma_addr);
+ desc->desc.info_ds = cpu_to_le16(skb->len);
+
+ desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT);
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ rdev->ts_tag++;
+ desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
+ }
+ skb_tx_timestamp(skb);
+
+ dma_wmb();
+
+ desc->desc.die_dt = DT_FSINGLE | DIE;
+ wmb(); /* gq->cur must be incremented after die_dt was set */
+
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
+
+ return ret;
+}
+
+static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
+{
+ return &ndev->stats;
+}
+
+static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct hwtstamp_config config;
+
+ ptp_priv = rdev->priv->ptp_priv;
+
+ config.flags = 0;
+ config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ HWTSTAMP_TX_OFF;
+ switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
+ case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ }
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
+ struct hwtstamp_config config;
+ u32 tstamp_tx_ctrl;
+
+ if (copy_from_user(&config, req->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ break;
+ }
+
+ rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return rswitch_hwstamp_get(ndev, req);
+ case SIOCSHWTSTAMP:
+ return rswitch_hwstamp_set(ndev, req);
+ default:
+ return phylink_mii_ioctl(rdev->phylink, req, cmd);
+ }
+}
+
+static const struct net_device_ops rswitch_netdev_ops = {
+ .ndo_open = rswitch_open,
+ .ndo_stop = rswitch_stop,
+ .ndo_start_xmit = rswitch_start_xmit,
+ .ndo_get_stats = rswitch_get_stats,
+ .ndo_eth_ioctl = rswitch_eth_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops rswitch_ethtool_ops = {
+ .get_ts_info = rswitch_get_ts_info,
+};
+
+static const struct of_device_id renesas_eth_sw_of_table[] = {
+ { .compatible = "renesas,r8a779f0-ether-switch", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
+
+static void rswitch_etha_init(struct rswitch_private *priv, int index)
+{
+ struct rswitch_etha *etha = &priv->etha[index];
+
+ memset(etha, 0, sizeof(*etha));
+ etha->index = index;
+ etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
+ etha->coma_addr = priv->addr;
+}
+
+static int rswitch_device_alloc(struct rswitch_private *priv, int index)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct rswitch_device *rdev;
+ struct net_device *ndev;
+ int err;
+
+ if (index >= RSWITCH_NUM_PORTS)
+ return -EINVAL;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ether_setup(ndev);
+
+ rdev = netdev_priv(ndev);
+ rdev->ndev = ndev;
+ rdev->priv = priv;
+ priv->rdev[index] = rdev;
+ rdev->port = index;
+ rdev->etha = &priv->etha[index];
+ rdev->addr = priv->addr;
+
+ ndev->base_addr = (unsigned long)rdev->addr;
+ snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
+ ndev->netdev_ops = &rswitch_netdev_ops;
+ ndev->ethtool_ops = &rswitch_ethtool_ops;
+
+ netif_napi_add(ndev, &rdev->napi, rswitch_poll);
+
+ err = of_get_ethdev_address(pdev->dev.of_node, ndev);
+ if (err) {
+ if (is_valid_ether_addr(rdev->etha->mac_addr))
+ eth_hw_addr_set(ndev, rdev->etha->mac_addr);
+ else
+ eth_hw_addr_random(ndev);
+ }
+
+ err = rswitch_etha_get_params(rdev);
+ if (err < 0)
+ goto out_get_params;
+
+ if (rdev->priv->gwca.speed < rdev->etha->speed)
+ rdev->priv->gwca.speed = rdev->etha->speed;
+
+ err = rswitch_rxdmac_alloc(ndev);
+ if (err < 0)
+ goto out_rxdmac;
+
+ err = rswitch_txdmac_alloc(ndev);
+ if (err < 0)
+ goto out_txdmac;
+
+ return 0;
+
+out_txdmac:
+ rswitch_rxdmac_free(ndev);
+
+out_rxdmac:
+out_get_params:
+ netif_napi_del(&rdev->napi);
+ free_netdev(ndev);
+
+ return err;
+}
+
+static void rswitch_device_free(struct rswitch_private *priv, int index)
+{
+ struct rswitch_device *rdev = priv->rdev[index];
+ struct net_device *ndev = rdev->ndev;
+
+ rswitch_txdmac_free(ndev);
+ rswitch_rxdmac_free(ndev);
+ netif_napi_del(&rdev->napi);
+ free_netdev(ndev);
+}
+
+static int rswitch_init(struct rswitch_private *priv)
+{
+ int i, err;
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ rswitch_etha_init(priv, i);
+
+ rswitch_clock_enable(priv);
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ rswitch_etha_read_mac_address(&priv->etha[i]);
+
+ rswitch_reset(priv);
+
+ rswitch_clock_enable(priv);
+ rswitch_top_init(priv);
+ err = rswitch_bpool_config(priv);
+ if (err < 0)
+ return err;
+
+ err = rswitch_gwca_desc_alloc(priv);
+ if (err < 0)
+ return -ENOMEM;
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ err = rswitch_device_alloc(priv, i);
+ if (err < 0) {
+ for (i--; i >= 0; i--)
+ rswitch_device_free(priv, i);
+ goto err_device_alloc;
+ }
+ }
+
+ rswitch_fwd_init(priv);
+
+ err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
+ RCAR_GEN4_PTP_CLOCK_S4);
+ if (err < 0)
+ goto err_ptp_register;
+
+ err = rswitch_gwca_request_irqs(priv);
+ if (err < 0)
+ goto err_gwca_request_irq;
+
+ err = rswitch_gwca_hw_init(priv);
+ if (err < 0)
+ goto err_gwca_hw_init;
+
+ err = rswitch_ether_port_init_all(priv);
+ if (err)
+ goto err_ether_port_init_all;
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ err = register_netdev(priv->rdev[i]->ndev);
+ if (err) {
+ for (i--; i >= 0; i--)
+ unregister_netdev(priv->rdev[i]->ndev);
+ goto err_register_netdev;
+ }
+ }
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
+ priv->rdev[i]->ndev->dev_addr);
+
+ return 0;
+
+err_register_netdev:
+ rswitch_ether_port_deinit_all(priv);
+
+err_ether_port_init_all:
+ rswitch_gwca_hw_deinit(priv);
+
+err_gwca_hw_init:
+err_gwca_request_irq:
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+
+err_ptp_register:
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ rswitch_device_free(priv, i);
+
+err_device_alloc:
+ rswitch_gwca_desc_free(priv);
+
+ return err;
+}
+
+static int renesas_eth_sw_probe(struct platform_device *pdev)
+{
+ struct rswitch_private *priv;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+ if (!priv->ptp_priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->pdev = pdev;
+ priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->addr))
+ return PTR_ERR(priv->addr);
+
+ priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret < 0) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ return ret;
+ }
+
+ priv->gwca.index = AGENT_INDEX_GWCA;
+ priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
+ RSWITCH_MAX_NUM_QUEUES);
+ priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
+ sizeof(*priv->gwca.queues), GFP_KERNEL);
+ if (!priv->gwca.queues)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = rswitch_init(priv);
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ return ret;
+}
+
+static void rswitch_deinit(struct rswitch_private *priv)
+{
+ int i;
+
+ rswitch_gwca_hw_deinit(priv);
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ struct rswitch_device *rdev = priv->rdev[i];
+
+ rswitch_serdes_deinit(rdev);
+ rswitch_ether_port_deinit_one(rdev);
+ unregister_netdev(rdev->ndev);
+ rswitch_device_free(priv, i);
+ }
+
+ rswitch_gwca_desc_free(priv);
+
+ rswitch_clock_disable(priv);
+}
+
+static int renesas_eth_sw_remove(struct platform_device *pdev)
+{
+ struct rswitch_private *priv = platform_get_drvdata(pdev);
+
+ rswitch_deinit(priv);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver renesas_eth_sw_driver_platform = {
+ .probe = renesas_eth_sw_probe,
+ .remove = renesas_eth_sw_remove,
+ .driver = {
+ .name = "renesas_eth_sw",
+ .of_match_table = renesas_eth_sw_of_table,
+ }
+};
+module_platform_driver(renesas_eth_sw_driver_platform);
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
new file mode 100644
index 000000000000..edbdd1b98d3d
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -0,0 +1,973 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#ifndef __RSWITCH_H__
+#define __RSWITCH_H__
+
+#include <linux/platform_device.h>
+#include "rcar_gen4_ptp.h"
+
+#define RSWITCH_MAX_NUM_QUEUES 128
+
+#define RSWITCH_NUM_PORTS 3
+
+#define TX_RING_SIZE 1024
+#define RX_RING_SIZE 1024
+
+#define PKT_BUF_SZ 1584
+#define RSWITCH_ALIGN 128
+#define RSWITCH_MAX_CTAG_PCP 7
+
+#define RSWITCH_TIMEOUT_US 100000
+
+#define RSWITCH_TOP_OFFSET 0x00008000
+#define RSWITCH_COMA_OFFSET 0x00009000
+#define RSWITCH_ETHA_OFFSET 0x0000a000 /* with RMAC */
+#define RSWITCH_ETHA_SIZE 0x00002000 /* with RMAC */
+#define RSWITCH_GWCA0_OFFSET 0x00010000
+#define RSWITCH_GWCA1_OFFSET 0x00012000
+
+/* TODO: hardcoded ETHA/GWCA settings for now */
+#define GWCA_IRQ_RESOURCE_NAME "gwca0_rxtx%d"
+#define GWCA_IRQ_NAME "rswitch: gwca0_rxtx%d"
+#define GWCA_NUM_IRQS 8
+#define GWCA_INDEX 0
+#define AGENT_INDEX_GWCA 3
+#define GWRO RSWITCH_GWCA0_OFFSET
+
+#define FWRO 0
+#define TPRO RSWITCH_TOP_OFFSET
+#define CARO RSWITCH_COMA_OFFSET
+#define TARO 0
+#define RMRO 0x1000
+enum rswitch_reg {
+ FWGC = FWRO + 0x0000,
+ FWTTC0 = FWRO + 0x0010,
+ FWTTC1 = FWRO + 0x0014,
+ FWLBMC = FWRO + 0x0018,
+ FWCEPTC = FWRO + 0x0020,
+ FWCEPRC0 = FWRO + 0x0024,
+ FWCEPRC1 = FWRO + 0x0028,
+ FWCEPRC2 = FWRO + 0x002c,
+ FWCLPTC = FWRO + 0x0030,
+ FWCLPRC = FWRO + 0x0034,
+ FWCMPTC = FWRO + 0x0040,
+ FWEMPTC = FWRO + 0x0044,
+ FWSDMPTC = FWRO + 0x0050,
+ FWSDMPVC = FWRO + 0x0054,
+ FWLBWMC0 = FWRO + 0x0080,
+ FWPC00 = FWRO + 0x0100,
+ FWPC10 = FWRO + 0x0104,
+ FWPC20 = FWRO + 0x0108,
+ FWCTGC00 = FWRO + 0x0400,
+ FWCTGC10 = FWRO + 0x0404,
+ FWCTTC00 = FWRO + 0x0408,
+ FWCTTC10 = FWRO + 0x040c,
+ FWCTTC200 = FWRO + 0x0410,
+ FWCTSC00 = FWRO + 0x0420,
+ FWCTSC10 = FWRO + 0x0424,
+ FWCTSC20 = FWRO + 0x0428,
+ FWCTSC30 = FWRO + 0x042c,
+ FWCTSC40 = FWRO + 0x0430,
+ FWTWBFC0 = FWRO + 0x1000,
+ FWTWBFVC0 = FWRO + 0x1004,
+ FWTHBFC0 = FWRO + 0x1400,
+ FWTHBFV0C0 = FWRO + 0x1404,
+ FWTHBFV1C0 = FWRO + 0x1408,
+ FWFOBFC0 = FWRO + 0x1800,
+ FWFOBFV0C0 = FWRO + 0x1804,
+ FWFOBFV1C0 = FWRO + 0x1808,
+ FWRFC0 = FWRO + 0x1c00,
+ FWRFVC0 = FWRO + 0x1c04,
+ FWCFC0 = FWRO + 0x2000,
+ FWCFMC00 = FWRO + 0x2004,
+ FWIP4SC = FWRO + 0x4008,
+ FWIP6SC = FWRO + 0x4018,
+ FWIP6OC = FWRO + 0x401c,
+ FWL2SC = FWRO + 0x4020,
+ FWSFHEC = FWRO + 0x4030,
+ FWSHCR0 = FWRO + 0x4040,
+ FWSHCR1 = FWRO + 0x4044,
+ FWSHCR2 = FWRO + 0x4048,
+ FWSHCR3 = FWRO + 0x404c,
+ FWSHCR4 = FWRO + 0x4050,
+ FWSHCR5 = FWRO + 0x4054,
+ FWSHCR6 = FWRO + 0x4058,
+ FWSHCR7 = FWRO + 0x405c,
+ FWSHCR8 = FWRO + 0x4060,
+ FWSHCR9 = FWRO + 0x4064,
+ FWSHCR10 = FWRO + 0x4068,
+ FWSHCR11 = FWRO + 0x406c,
+ FWSHCR12 = FWRO + 0x4070,
+ FWSHCR13 = FWRO + 0x4074,
+ FWSHCRR = FWRO + 0x4078,
+ FWLTHHEC = FWRO + 0x4090,
+ FWLTHHC = FWRO + 0x4094,
+ FWLTHTL0 = FWRO + 0x40a0,
+ FWLTHTL1 = FWRO + 0x40a4,
+ FWLTHTL2 = FWRO + 0x40a8,
+ FWLTHTL3 = FWRO + 0x40ac,
+ FWLTHTL4 = FWRO + 0x40b0,
+ FWLTHTL5 = FWRO + 0x40b4,
+ FWLTHTL6 = FWRO + 0x40b8,
+ FWLTHTL7 = FWRO + 0x40bc,
+ FWLTHTL80 = FWRO + 0x40c0,
+ FWLTHTL9 = FWRO + 0x40d0,
+ FWLTHTLR = FWRO + 0x40d4,
+ FWLTHTIM = FWRO + 0x40e0,
+ FWLTHTEM = FWRO + 0x40e4,
+ FWLTHTS0 = FWRO + 0x4100,
+ FWLTHTS1 = FWRO + 0x4104,
+ FWLTHTS2 = FWRO + 0x4108,
+ FWLTHTS3 = FWRO + 0x410c,
+ FWLTHTS4 = FWRO + 0x4110,
+ FWLTHTSR0 = FWRO + 0x4120,
+ FWLTHTSR1 = FWRO + 0x4124,
+ FWLTHTSR2 = FWRO + 0x4128,
+ FWLTHTSR3 = FWRO + 0x412c,
+ FWLTHTSR40 = FWRO + 0x4130,
+ FWLTHTSR5 = FWRO + 0x4140,
+ FWLTHTR = FWRO + 0x4150,
+ FWLTHTRR0 = FWRO + 0x4154,
+ FWLTHTRR1 = FWRO + 0x4158,
+ FWLTHTRR2 = FWRO + 0x415c,
+ FWLTHTRR3 = FWRO + 0x4160,
+ FWLTHTRR4 = FWRO + 0x4164,
+ FWLTHTRR5 = FWRO + 0x4168,
+ FWLTHTRR6 = FWRO + 0x416c,
+ FWLTHTRR7 = FWRO + 0x4170,
+ FWLTHTRR8 = FWRO + 0x4174,
+ FWLTHTRR9 = FWRO + 0x4180,
+ FWLTHTRR10 = FWRO + 0x4190,
+ FWIPHEC = FWRO + 0x4214,
+ FWIPHC = FWRO + 0x4218,
+ FWIPTL0 = FWRO + 0x4220,
+ FWIPTL1 = FWRO + 0x4224,
+ FWIPTL2 = FWRO + 0x4228,
+ FWIPTL3 = FWRO + 0x422c,
+ FWIPTL4 = FWRO + 0x4230,
+ FWIPTL5 = FWRO + 0x4234,
+ FWIPTL6 = FWRO + 0x4238,
+ FWIPTL7 = FWRO + 0x4240,
+ FWIPTL8 = FWRO + 0x4250,
+ FWIPTLR = FWRO + 0x4254,
+ FWIPTIM = FWRO + 0x4260,
+ FWIPTEM = FWRO + 0x4264,
+ FWIPTS0 = FWRO + 0x4270,
+ FWIPTS1 = FWRO + 0x4274,
+ FWIPTS2 = FWRO + 0x4278,
+ FWIPTS3 = FWRO + 0x427c,
+ FWIPTS4 = FWRO + 0x4280,
+ FWIPTSR0 = FWRO + 0x4284,
+ FWIPTSR1 = FWRO + 0x4288,
+ FWIPTSR2 = FWRO + 0x428c,
+ FWIPTSR3 = FWRO + 0x4290,
+ FWIPTSR4 = FWRO + 0x42a0,
+ FWIPTR = FWRO + 0x42b0,
+ FWIPTRR0 = FWRO + 0x42b4,
+ FWIPTRR1 = FWRO + 0x42b8,
+ FWIPTRR2 = FWRO + 0x42bc,
+ FWIPTRR3 = FWRO + 0x42c0,
+ FWIPTRR4 = FWRO + 0x42c4,
+ FWIPTRR5 = FWRO + 0x42c8,
+ FWIPTRR6 = FWRO + 0x42cc,
+ FWIPTRR7 = FWRO + 0x42d0,
+ FWIPTRR8 = FWRO + 0x42e0,
+ FWIPTRR9 = FWRO + 0x42f0,
+ FWIPHLEC = FWRO + 0x4300,
+ FWIPAGUSPC = FWRO + 0x4500,
+ FWIPAGC = FWRO + 0x4504,
+ FWIPAGM0 = FWRO + 0x4510,
+ FWIPAGM1 = FWRO + 0x4514,
+ FWIPAGM2 = FWRO + 0x4518,
+ FWIPAGM3 = FWRO + 0x451c,
+ FWIPAGM4 = FWRO + 0x4520,
+ FWMACHEC = FWRO + 0x4620,
+ FWMACHC = FWRO + 0x4624,
+ FWMACTL0 = FWRO + 0x4630,
+ FWMACTL1 = FWRO + 0x4634,
+ FWMACTL2 = FWRO + 0x4638,
+ FWMACTL3 = FWRO + 0x463c,
+ FWMACTL4 = FWRO + 0x4640,
+ FWMACTL5 = FWRO + 0x4650,
+ FWMACTLR = FWRO + 0x4654,
+ FWMACTIM = FWRO + 0x4660,
+ FWMACTEM = FWRO + 0x4664,
+ FWMACTS0 = FWRO + 0x4670,
+ FWMACTS1 = FWRO + 0x4674,
+ FWMACTSR0 = FWRO + 0x4678,
+ FWMACTSR1 = FWRO + 0x467c,
+ FWMACTSR2 = FWRO + 0x4680,
+ FWMACTSR3 = FWRO + 0x4690,
+ FWMACTR = FWRO + 0x46a0,
+ FWMACTRR0 = FWRO + 0x46a4,
+ FWMACTRR1 = FWRO + 0x46a8,
+ FWMACTRR2 = FWRO + 0x46ac,
+ FWMACTRR3 = FWRO + 0x46b0,
+ FWMACTRR4 = FWRO + 0x46b4,
+ FWMACTRR5 = FWRO + 0x46c0,
+ FWMACTRR6 = FWRO + 0x46d0,
+ FWMACHLEC = FWRO + 0x4700,
+ FWMACAGUSPC = FWRO + 0x4880,
+ FWMACAGC = FWRO + 0x4884,
+ FWMACAGM0 = FWRO + 0x4888,
+ FWMACAGM1 = FWRO + 0x488c,
+ FWVLANTEC = FWRO + 0x4900,
+ FWVLANTL0 = FWRO + 0x4910,
+ FWVLANTL1 = FWRO + 0x4914,
+ FWVLANTL2 = FWRO + 0x4918,
+ FWVLANTL3 = FWRO + 0x4920,
+ FWVLANTL4 = FWRO + 0x4930,
+ FWVLANTLR = FWRO + 0x4934,
+ FWVLANTIM = FWRO + 0x4940,
+ FWVLANTEM = FWRO + 0x4944,
+ FWVLANTS = FWRO + 0x4950,
+ FWVLANTSR0 = FWRO + 0x4954,
+ FWVLANTSR1 = FWRO + 0x4958,
+ FWVLANTSR2 = FWRO + 0x4960,
+ FWVLANTSR3 = FWRO + 0x4970,
+ FWPBFC0 = FWRO + 0x4a00,
+ FWPBFCSDC00 = FWRO + 0x4a04,
+ FWL23URL0 = FWRO + 0x4e00,
+ FWL23URL1 = FWRO + 0x4e04,
+ FWL23URL2 = FWRO + 0x4e08,
+ FWL23URL3 = FWRO + 0x4e0c,
+ FWL23URLR = FWRO + 0x4e10,
+ FWL23UTIM = FWRO + 0x4e20,
+ FWL23URR = FWRO + 0x4e30,
+ FWL23URRR0 = FWRO + 0x4e34,
+ FWL23URRR1 = FWRO + 0x4e38,
+ FWL23URRR2 = FWRO + 0x4e3c,
+ FWL23URRR3 = FWRO + 0x4e40,
+ FWL23URMC0 = FWRO + 0x4f00,
+ FWPMFGC0 = FWRO + 0x5000,
+ FWPGFC0 = FWRO + 0x5100,
+ FWPGFIGSC0 = FWRO + 0x5104,
+ FWPGFENC0 = FWRO + 0x5108,
+ FWPGFENM0 = FWRO + 0x510c,
+ FWPGFCSTC00 = FWRO + 0x5110,
+ FWPGFCSTC10 = FWRO + 0x5114,
+ FWPGFCSTM00 = FWRO + 0x5118,
+ FWPGFCSTM10 = FWRO + 0x511c,
+ FWPGFCTC0 = FWRO + 0x5120,
+ FWPGFCTM0 = FWRO + 0x5124,
+ FWPGFHCC0 = FWRO + 0x5128,
+ FWPGFSM0 = FWRO + 0x512c,
+ FWPGFGC0 = FWRO + 0x5130,
+ FWPGFGL0 = FWRO + 0x5500,
+ FWPGFGL1 = FWRO + 0x5504,
+ FWPGFGLR = FWRO + 0x5518,
+ FWPGFGR = FWRO + 0x5510,
+ FWPGFGRR0 = FWRO + 0x5514,
+ FWPGFGRR1 = FWRO + 0x5518,
+ FWPGFRIM = FWRO + 0x5520,
+ FWPMTRFC0 = FWRO + 0x5600,
+ FWPMTRCBSC0 = FWRO + 0x5604,
+ FWPMTRC0RC0 = FWRO + 0x5608,
+ FWPMTREBSC0 = FWRO + 0x560c,
+ FWPMTREIRC0 = FWRO + 0x5610,
+ FWPMTRFM0 = FWRO + 0x5614,
+ FWFTL0 = FWRO + 0x6000,
+ FWFTL1 = FWRO + 0x6004,
+ FWFTLR = FWRO + 0x6008,
+ FWFTOC = FWRO + 0x6010,
+ FWFTOPC = FWRO + 0x6014,
+ FWFTIM = FWRO + 0x6020,
+ FWFTR = FWRO + 0x6030,
+ FWFTRR0 = FWRO + 0x6034,
+ FWFTRR1 = FWRO + 0x6038,
+ FWFTRR2 = FWRO + 0x603c,
+ FWSEQNGC0 = FWRO + 0x6100,
+ FWSEQNGM0 = FWRO + 0x6104,
+ FWSEQNRC = FWRO + 0x6200,
+ FWCTFDCN0 = FWRO + 0x6300,
+ FWLTHFDCN0 = FWRO + 0x6304,
+ FWIPFDCN0 = FWRO + 0x6308,
+ FWLTWFDCN0 = FWRO + 0x630c,
+ FWPBFDCN0 = FWRO + 0x6310,
+ FWMHLCN0 = FWRO + 0x6314,
+ FWIHLCN0 = FWRO + 0x6318,
+ FWICRDCN0 = FWRO + 0x6500,
+ FWWMRDCN0 = FWRO + 0x6504,
+ FWCTRDCN0 = FWRO + 0x6508,
+ FWLTHRDCN0 = FWRO + 0x650c,
+ FWIPRDCN0 = FWRO + 0x6510,
+ FWLTWRDCN0 = FWRO + 0x6514,
+ FWPBRDCN0 = FWRO + 0x6518,
+ FWPMFDCN0 = FWRO + 0x6700,
+ FWPGFDCN0 = FWRO + 0x6780,
+ FWPMGDCN0 = FWRO + 0x6800,
+ FWPMYDCN0 = FWRO + 0x6804,
+ FWPMRDCN0 = FWRO + 0x6808,
+ FWFRPPCN0 = FWRO + 0x6a00,
+ FWFRDPCN0 = FWRO + 0x6a04,
+ FWEIS00 = FWRO + 0x7900,
+ FWEIE00 = FWRO + 0x7904,
+ FWEID00 = FWRO + 0x7908,
+ FWEIS1 = FWRO + 0x7a00,
+ FWEIE1 = FWRO + 0x7a04,
+ FWEID1 = FWRO + 0x7a08,
+ FWEIS2 = FWRO + 0x7a10,
+ FWEIE2 = FWRO + 0x7a14,
+ FWEID2 = FWRO + 0x7a18,
+ FWEIS3 = FWRO + 0x7a20,
+ FWEIE3 = FWRO + 0x7a24,
+ FWEID3 = FWRO + 0x7a28,
+ FWEIS4 = FWRO + 0x7a30,
+ FWEIE4 = FWRO + 0x7a34,
+ FWEID4 = FWRO + 0x7a38,
+ FWEIS5 = FWRO + 0x7a40,
+ FWEIE5 = FWRO + 0x7a44,
+ FWEID5 = FWRO + 0x7a48,
+ FWEIS60 = FWRO + 0x7a50,
+ FWEIE60 = FWRO + 0x7a54,
+ FWEID60 = FWRO + 0x7a58,
+ FWEIS61 = FWRO + 0x7a60,
+ FWEIE61 = FWRO + 0x7a64,
+ FWEID61 = FWRO + 0x7a68,
+ FWEIS62 = FWRO + 0x7a70,
+ FWEIE62 = FWRO + 0x7a74,
+ FWEID62 = FWRO + 0x7a78,
+ FWEIS63 = FWRO + 0x7a80,
+ FWEIE63 = FWRO + 0x7a84,
+ FWEID63 = FWRO + 0x7a88,
+ FWEIS70 = FWRO + 0x7a90,
+ FWEIE70 = FWRO + 0x7A94,
+ FWEID70 = FWRO + 0x7a98,
+ FWEIS71 = FWRO + 0x7aa0,
+ FWEIE71 = FWRO + 0x7aa4,
+ FWEID71 = FWRO + 0x7aa8,
+ FWEIS72 = FWRO + 0x7ab0,
+ FWEIE72 = FWRO + 0x7ab4,
+ FWEID72 = FWRO + 0x7ab8,
+ FWEIS73 = FWRO + 0x7ac0,
+ FWEIE73 = FWRO + 0x7ac4,
+ FWEID73 = FWRO + 0x7ac8,
+ FWEIS80 = FWRO + 0x7ad0,
+ FWEIE80 = FWRO + 0x7ad4,
+ FWEID80 = FWRO + 0x7ad8,
+ FWEIS81 = FWRO + 0x7ae0,
+ FWEIE81 = FWRO + 0x7ae4,
+ FWEID81 = FWRO + 0x7ae8,
+ FWEIS82 = FWRO + 0x7af0,
+ FWEIE82 = FWRO + 0x7af4,
+ FWEID82 = FWRO + 0x7af8,
+ FWEIS83 = FWRO + 0x7b00,
+ FWEIE83 = FWRO + 0x7b04,
+ FWEID83 = FWRO + 0x7b08,
+ FWMIS0 = FWRO + 0x7c00,
+ FWMIE0 = FWRO + 0x7c04,
+ FWMID0 = FWRO + 0x7c08,
+ FWSCR0 = FWRO + 0x7d00,
+ FWSCR1 = FWRO + 0x7d04,
+ FWSCR2 = FWRO + 0x7d08,
+ FWSCR3 = FWRO + 0x7d0c,
+ FWSCR4 = FWRO + 0x7d10,
+ FWSCR5 = FWRO + 0x7d14,
+ FWSCR6 = FWRO + 0x7d18,
+ FWSCR7 = FWRO + 0x7d1c,
+ FWSCR8 = FWRO + 0x7d20,
+ FWSCR9 = FWRO + 0x7d24,
+ FWSCR10 = FWRO + 0x7d28,
+ FWSCR11 = FWRO + 0x7d2c,
+ FWSCR12 = FWRO + 0x7d30,
+ FWSCR13 = FWRO + 0x7d34,
+ FWSCR14 = FWRO + 0x7d38,
+ FWSCR15 = FWRO + 0x7d3c,
+ FWSCR16 = FWRO + 0x7d40,
+ FWSCR17 = FWRO + 0x7d44,
+ FWSCR18 = FWRO + 0x7d48,
+ FWSCR19 = FWRO + 0x7d4c,
+ FWSCR20 = FWRO + 0x7d50,
+ FWSCR21 = FWRO + 0x7d54,
+ FWSCR22 = FWRO + 0x7d58,
+ FWSCR23 = FWRO + 0x7d5c,
+ FWSCR24 = FWRO + 0x7d60,
+ FWSCR25 = FWRO + 0x7d64,
+ FWSCR26 = FWRO + 0x7d68,
+ FWSCR27 = FWRO + 0x7d6c,
+ FWSCR28 = FWRO + 0x7d70,
+ FWSCR29 = FWRO + 0x7d74,
+ FWSCR30 = FWRO + 0x7d78,
+ FWSCR31 = FWRO + 0x7d7c,
+ FWSCR32 = FWRO + 0x7d80,
+ FWSCR33 = FWRO + 0x7d84,
+ FWSCR34 = FWRO + 0x7d88,
+ FWSCR35 = FWRO + 0x7d8c,
+ FWSCR36 = FWRO + 0x7d90,
+ FWSCR37 = FWRO + 0x7d94,
+ FWSCR38 = FWRO + 0x7d98,
+ FWSCR39 = FWRO + 0x7d9c,
+ FWSCR40 = FWRO + 0x7da0,
+ FWSCR41 = FWRO + 0x7da4,
+ FWSCR42 = FWRO + 0x7da8,
+ FWSCR43 = FWRO + 0x7dac,
+ FWSCR44 = FWRO + 0x7db0,
+ FWSCR45 = FWRO + 0x7db4,
+ FWSCR46 = FWRO + 0x7db8,
+
+ TPEMIMC0 = TPRO + 0x0000,
+ TPEMIMC1 = TPRO + 0x0004,
+ TPEMIMC2 = TPRO + 0x0008,
+ TPEMIMC3 = TPRO + 0x000c,
+ TPEMIMC4 = TPRO + 0x0010,
+ TPEMIMC5 = TPRO + 0x0014,
+ TPEMIMC60 = TPRO + 0x0080,
+ TPEMIMC70 = TPRO + 0x0100,
+ TSIM = TPRO + 0x0700,
+ TFIM = TPRO + 0x0704,
+ TCIM = TPRO + 0x0708,
+ TGIM0 = TPRO + 0x0710,
+ TGIM1 = TPRO + 0x0714,
+ TEIM0 = TPRO + 0x0720,
+ TEIM1 = TPRO + 0x0724,
+ TEIM2 = TPRO + 0x0728,
+
+ RIPV = CARO + 0x0000,
+ RRC = CARO + 0x0004,
+ RCEC = CARO + 0x0008,
+ RCDC = CARO + 0x000c,
+ RSSIS = CARO + 0x0010,
+ RSSIE = CARO + 0x0014,
+ RSSID = CARO + 0x0018,
+ CABPIBWMC = CARO + 0x0020,
+ CABPWMLC = CARO + 0x0040,
+ CABPPFLC0 = CARO + 0x0050,
+ CABPPWMLC0 = CARO + 0x0060,
+ CABPPPFLC00 = CARO + 0x00a0,
+ CABPULC = CARO + 0x0100,
+ CABPIRM = CARO + 0x0140,
+ CABPPCM = CARO + 0x0144,
+ CABPLCM = CARO + 0x0148,
+ CABPCPM = CARO + 0x0180,
+ CABPMCPM = CARO + 0x0200,
+ CARDNM = CARO + 0x0280,
+ CARDMNM = CARO + 0x0284,
+ CARDCN = CARO + 0x0290,
+ CAEIS0 = CARO + 0x0300,
+ CAEIE0 = CARO + 0x0304,
+ CAEID0 = CARO + 0x0308,
+ CAEIS1 = CARO + 0x0310,
+ CAEIE1 = CARO + 0x0314,
+ CAEID1 = CARO + 0x0318,
+ CAMIS0 = CARO + 0x0340,
+ CAMIE0 = CARO + 0x0344,
+ CAMID0 = CARO + 0x0348,
+ CAMIS1 = CARO + 0x0350,
+ CAMIE1 = CARO + 0x0354,
+ CAMID1 = CARO + 0x0358,
+ CASCR = CARO + 0x0380,
+
+ EAMC = TARO + 0x0000,
+ EAMS = TARO + 0x0004,
+ EAIRC = TARO + 0x0010,
+ EATDQSC = TARO + 0x0014,
+ EATDQC = TARO + 0x0018,
+ EATDQAC = TARO + 0x001c,
+ EATPEC = TARO + 0x0020,
+ EATMFSC0 = TARO + 0x0040,
+ EATDQDC0 = TARO + 0x0060,
+ EATDQM0 = TARO + 0x0080,
+ EATDQMLM0 = TARO + 0x00a0,
+ EACTQC = TARO + 0x0100,
+ EACTDQDC = TARO + 0x0104,
+ EACTDQM = TARO + 0x0108,
+ EACTDQMLM = TARO + 0x010c,
+ EAVCC = TARO + 0x0130,
+ EAVTC = TARO + 0x0134,
+ EATTFC = TARO + 0x0138,
+ EACAEC = TARO + 0x0200,
+ EACC = TARO + 0x0204,
+ EACAIVC0 = TARO + 0x0220,
+ EACAULC0 = TARO + 0x0240,
+ EACOEM = TARO + 0x0260,
+ EACOIVM0 = TARO + 0x0280,
+ EACOULM0 = TARO + 0x02a0,
+ EACGSM = TARO + 0x02c0,
+ EATASC = TARO + 0x0300,
+ EATASENC0 = TARO + 0x0320,
+ EATASCTENC = TARO + 0x0340,
+ EATASENM0 = TARO + 0x0360,
+ EATASCTENM = TARO + 0x0380,
+ EATASCSTC0 = TARO + 0x03a0,
+ EATASCSTC1 = TARO + 0x03a4,
+ EATASCSTM0 = TARO + 0x03a8,
+ EATASCSTM1 = TARO + 0x03ac,
+ EATASCTC = TARO + 0x03b0,
+ EATASCTM = TARO + 0x03b4,
+ EATASGL0 = TARO + 0x03c0,
+ EATASGL1 = TARO + 0x03c4,
+ EATASGLR = TARO + 0x03c8,
+ EATASGR = TARO + 0x03d0,
+ EATASGRR = TARO + 0x03d4,
+ EATASHCC = TARO + 0x03e0,
+ EATASRIRM = TARO + 0x03e4,
+ EATASSM = TARO + 0x03e8,
+ EAUSMFSECN = TARO + 0x0400,
+ EATFECN = TARO + 0x0404,
+ EAFSECN = TARO + 0x0408,
+ EADQOECN = TARO + 0x040c,
+ EADQSECN = TARO + 0x0410,
+ EACKSECN = TARO + 0x0414,
+ EAEIS0 = TARO + 0x0500,
+ EAEIE0 = TARO + 0x0504,
+ EAEID0 = TARO + 0x0508,
+ EAEIS1 = TARO + 0x0510,
+ EAEIE1 = TARO + 0x0514,
+ EAEID1 = TARO + 0x0518,
+ EAEIS2 = TARO + 0x0520,
+ EAEIE2 = TARO + 0x0524,
+ EAEID2 = TARO + 0x0528,
+ EASCR = TARO + 0x0580,
+
+ MPSM = RMRO + 0x0000,
+ MPIC = RMRO + 0x0004,
+ MPIM = RMRO + 0x0008,
+ MIOC = RMRO + 0x0010,
+ MIOM = RMRO + 0x0014,
+ MXMS = RMRO + 0x0018,
+ MTFFC = RMRO + 0x0020,
+ MTPFC = RMRO + 0x0024,
+ MTPFC2 = RMRO + 0x0028,
+ MTPFC30 = RMRO + 0x0030,
+ MTATC0 = RMRO + 0x0050,
+ MTIM = RMRO + 0x0060,
+ MRGC = RMRO + 0x0080,
+ MRMAC0 = RMRO + 0x0084,
+ MRMAC1 = RMRO + 0x0088,
+ MRAFC = RMRO + 0x008c,
+ MRSCE = RMRO + 0x0090,
+ MRSCP = RMRO + 0x0094,
+ MRSCC = RMRO + 0x0098,
+ MRFSCE = RMRO + 0x009c,
+ MRFSCP = RMRO + 0x00a0,
+ MTRC = RMRO + 0x00a4,
+ MRIM = RMRO + 0x00a8,
+ MRPFM = RMRO + 0x00ac,
+ MPFC0 = RMRO + 0x0100,
+ MLVC = RMRO + 0x0180,
+ MEEEC = RMRO + 0x0184,
+ MLBC = RMRO + 0x0188,
+ MXGMIIC = RMRO + 0x0190,
+ MPCH = RMRO + 0x0194,
+ MANC = RMRO + 0x0198,
+ MANM = RMRO + 0x019c,
+ MPLCA1 = RMRO + 0x01a0,
+ MPLCA2 = RMRO + 0x01a4,
+ MPLCA3 = RMRO + 0x01a8,
+ MPLCA4 = RMRO + 0x01ac,
+ MPLCAM = RMRO + 0x01b0,
+ MHDC1 = RMRO + 0x01c0,
+ MHDC2 = RMRO + 0x01c4,
+ MEIS = RMRO + 0x0200,
+ MEIE = RMRO + 0x0204,
+ MEID = RMRO + 0x0208,
+ MMIS0 = RMRO + 0x0210,
+ MMIE0 = RMRO + 0x0214,
+ MMID0 = RMRO + 0x0218,
+ MMIS1 = RMRO + 0x0220,
+ MMIE1 = RMRO + 0x0224,
+ MMID1 = RMRO + 0x0228,
+ MMIS2 = RMRO + 0x0230,
+ MMIE2 = RMRO + 0x0234,
+ MMID2 = RMRO + 0x0238,
+ MMPFTCT = RMRO + 0x0300,
+ MAPFTCT = RMRO + 0x0304,
+ MPFRCT = RMRO + 0x0308,
+ MFCICT = RMRO + 0x030c,
+ MEEECT = RMRO + 0x0310,
+ MMPCFTCT0 = RMRO + 0x0320,
+ MAPCFTCT0 = RMRO + 0x0330,
+ MPCFRCT0 = RMRO + 0x0340,
+ MHDCC = RMRO + 0x0350,
+ MROVFC = RMRO + 0x0354,
+ MRHCRCEC = RMRO + 0x0358,
+ MRXBCE = RMRO + 0x0400,
+ MRXBCP = RMRO + 0x0404,
+ MRGFCE = RMRO + 0x0408,
+ MRGFCP = RMRO + 0x040c,
+ MRBFC = RMRO + 0x0410,
+ MRMFC = RMRO + 0x0414,
+ MRUFC = RMRO + 0x0418,
+ MRPEFC = RMRO + 0x041c,
+ MRNEFC = RMRO + 0x0420,
+ MRFMEFC = RMRO + 0x0424,
+ MRFFMEFC = RMRO + 0x0428,
+ MRCFCEFC = RMRO + 0x042c,
+ MRFCEFC = RMRO + 0x0430,
+ MRRCFEFC = RMRO + 0x0434,
+ MRUEFC = RMRO + 0x043c,
+ MROEFC = RMRO + 0x0440,
+ MRBOEC = RMRO + 0x0444,
+ MTXBCE = RMRO + 0x0500,
+ MTXBCP = RMRO + 0x0504,
+ MTGFCE = RMRO + 0x0508,
+ MTGFCP = RMRO + 0x050c,
+ MTBFC = RMRO + 0x0510,
+ MTMFC = RMRO + 0x0514,
+ MTUFC = RMRO + 0x0518,
+ MTEFC = RMRO + 0x051c,
+
+ GWMC = GWRO + 0x0000,
+ GWMS = GWRO + 0x0004,
+ GWIRC = GWRO + 0x0010,
+ GWRDQSC = GWRO + 0x0014,
+ GWRDQC = GWRO + 0x0018,
+ GWRDQAC = GWRO + 0x001c,
+ GWRGC = GWRO + 0x0020,
+ GWRMFSC0 = GWRO + 0x0040,
+ GWRDQDC0 = GWRO + 0x0060,
+ GWRDQM0 = GWRO + 0x0080,
+ GWRDQMLM0 = GWRO + 0x00a0,
+ GWMTIRM = GWRO + 0x0100,
+ GWMSTLS = GWRO + 0x0104,
+ GWMSTLR = GWRO + 0x0108,
+ GWMSTSS = GWRO + 0x010c,
+ GWMSTSR = GWRO + 0x0110,
+ GWMAC0 = GWRO + 0x0120,
+ GWMAC1 = GWRO + 0x0124,
+ GWVCC = GWRO + 0x0130,
+ GWVTC = GWRO + 0x0134,
+ GWTTFC = GWRO + 0x0138,
+ GWTDCAC00 = GWRO + 0x0140,
+ GWTDCAC10 = GWRO + 0x0144,
+ GWTSDCC0 = GWRO + 0x0160,
+ GWTNM = GWRO + 0x0180,
+ GWTMNM = GWRO + 0x0184,
+ GWAC = GWRO + 0x0190,
+ GWDCBAC0 = GWRO + 0x0194,
+ GWDCBAC1 = GWRO + 0x0198,
+ GWIICBSC = GWRO + 0x019c,
+ GWMDNC = GWRO + 0x01a0,
+ GWTRC0 = GWRO + 0x0200,
+ GWTPC0 = GWRO + 0x0300,
+ GWARIRM = GWRO + 0x0380,
+ GWDCC0 = GWRO + 0x0400,
+ GWAARSS = GWRO + 0x0800,
+ GWAARSR0 = GWRO + 0x0804,
+ GWAARSR1 = GWRO + 0x0808,
+ GWIDAUAS0 = GWRO + 0x0840,
+ GWIDASM0 = GWRO + 0x0880,
+ GWIDASAM00 = GWRO + 0x0900,
+ GWIDASAM10 = GWRO + 0x0904,
+ GWIDACAM00 = GWRO + 0x0980,
+ GWIDACAM10 = GWRO + 0x0984,
+ GWGRLC = GWRO + 0x0a00,
+ GWGRLULC = GWRO + 0x0a04,
+ GWRLIVC0 = GWRO + 0x0a80,
+ GWRLULC0 = GWRO + 0x0a84,
+ GWIDPC = GWRO + 0x0b00,
+ GWIDC0 = GWRO + 0x0c00,
+ GWDIS0 = GWRO + 0x1100,
+ GWDIE0 = GWRO + 0x1104,
+ GWDID0 = GWRO + 0x1108,
+ GWTSDIS = GWRO + 0x1180,
+ GWTSDIE = GWRO + 0x1184,
+ GWTSDID = GWRO + 0x1188,
+ GWEIS0 = GWRO + 0x1190,
+ GWEIE0 = GWRO + 0x1194,
+ GWEID0 = GWRO + 0x1198,
+ GWEIS1 = GWRO + 0x11a0,
+ GWEIE1 = GWRO + 0x11a4,
+ GWEID1 = GWRO + 0x11a8,
+ GWEIS20 = GWRO + 0x1200,
+ GWEIE20 = GWRO + 0x1204,
+ GWEID20 = GWRO + 0x1208,
+ GWEIS3 = GWRO + 0x1280,
+ GWEIE3 = GWRO + 0x1284,
+ GWEID3 = GWRO + 0x1288,
+ GWEIS4 = GWRO + 0x1290,
+ GWEIE4 = GWRO + 0x1294,
+ GWEID4 = GWRO + 0x1298,
+ GWEIS5 = GWRO + 0x12a0,
+ GWEIE5 = GWRO + 0x12a4,
+ GWEID5 = GWRO + 0x12a8,
+ GWSCR0 = GWRO + 0x1800,
+ GWSCR1 = GWRO + 0x1900,
+};
+
+/* ETHA/RMAC */
+enum rswitch_etha_mode {
+ EAMC_OPC_RESET,
+ EAMC_OPC_DISABLE,
+ EAMC_OPC_CONFIG,
+ EAMC_OPC_OPERATION,
+};
+
+#define EAMS_OPS_MASK EAMC_OPC_OPERATION
+
+#define EAVCC_VEM_SC_TAG (0x3 << 16)
+
+#define MPIC_PIS_MII 0x00
+#define MPIC_PIS_GMII 0x02
+#define MPIC_PIS_XGMII 0x04
+#define MPIC_LSC_SHIFT 3
+#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT)
+
+#define MDIO_READ_C45 0x03
+#define MDIO_WRITE_C45 0x01
+
+#define MPSM_PSME BIT(0)
+#define MPSM_MFF_C45 BIT(2)
+#define MPSM_PRD_SHIFT 16
+#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
+
+/* Completion flags */
+#define MMIS1_PAACS BIT(2) /* Address */
+#define MMIS1_PWACS BIT(1) /* Write */
+#define MMIS1_PRACS BIT(0) /* Read */
+#define MMIS1_CLEAR_FLAGS 0xf
+
+#define MPIC_PSMCS_SHIFT 16
+#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
+#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
+
+#define MPIC_PSMHT_SHIFT 24
+#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
+#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+
+#define MLVC_PLV BIT(16)
+
+/* GWCA */
+enum rswitch_gwca_mode {
+ GWMC_OPC_RESET,
+ GWMC_OPC_DISABLE,
+ GWMC_OPC_CONFIG,
+ GWMC_OPC_OPERATION,
+};
+
+#define GWMS_OPS_MASK GWMC_OPC_OPERATION
+
+#define GWMTIRM_MTIOG BIT(0)
+#define GWMTIRM_MTR BIT(1)
+
+#define GWVCC_VEM_SC_TAG (0x3 << 16)
+
+#define GWARIRM_ARIOG BIT(0)
+#define GWARIRM_ARR BIT(1)
+
+#define GWDCC_BALR BIT(24)
+#define GWDCC_DQT BIT(11)
+#define GWDCC_ETS BIT(9)
+#define GWDCC_EDE BIT(8)
+
+#define GWTRC(queue) (GWTRC0 + (queue) / 32 * 4)
+#define GWDCC_OFFS(queue) (GWDCC0 + (queue) * 4)
+
+#define GWDIS(i) (GWDIS0 + (i) * 0x10)
+#define GWDIE(i) (GWDIE0 + (i) * 0x10)
+#define GWDID(i) (GWDID0 + (i) * 0x10)
+
+/* COMA */
+#define RRC_RR BIT(0)
+#define RRC_RR_CLR 0
+#define RCEC_ACE_DEFAULT (BIT(0) | BIT(AGENT_INDEX_GWCA))
+#define RCEC_RCE BIT(16)
+#define RCDC_RCD BIT(16)
+
+#define CABPIRM_BPIOG BIT(0)
+#define CABPIRM_BPR BIT(1)
+
+/* MFWD */
+#define FWPC0_LTHTA BIT(0)
+#define FWPC0_IP4UE BIT(3)
+#define FWPC0_IP4TE BIT(4)
+#define FWPC0_IP4OE BIT(5)
+#define FWPC0_L2SE BIT(9)
+#define FWPC0_IP4EA BIT(10)
+#define FWPC0_IPDSA BIT(12)
+#define FWPC0_IPHLA BIT(18)
+#define FWPC0_MACSDA BIT(20)
+#define FWPC0_MACHLA BIT(26)
+#define FWPC0_MACHMA BIT(27)
+#define FWPC0_VLANSA BIT(28)
+
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
+#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
+ FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
+ FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
+ FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
+#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWPC1_DDE BIT(0)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+
+#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
+
+/* TOP */
+#define TPEMIMC7(queue) (TPEMIMC70 + (queue) * 4)
+
+/* Descriptors */
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+enum TX_DS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum DIE_DT {
+ /* Frame data */
+ DT_FSINGLE = 0x80,
+ DT_FSTART = 0x90,
+ DT_FMID = 0xa0,
+ DT_FEND = 0xb8,
+
+ /* Chain control */
+ DT_LEMPTY = 0xc0,
+ DT_EEMPTY = 0xd0,
+ DT_LINKFIX = 0x00,
+ DT_LINK = 0xe0,
+ DT_EOS = 0xf0,
+ /* HW/SW arbitration */
+ DT_FEMPTY = 0x40,
+ DT_FEMPTY_IS = 0x10,
+ DT_FEMPTY_IC = 0x20,
+ DT_FEMPTY_ND = 0x38,
+ DT_FEMPTY_START = 0x50,
+ DT_FEMPTY_MID = 0x60,
+ DT_FEMPTY_END = 0x70,
+
+ DT_MASK = 0xf0,
+ DIE = 0x08, /* Descriptor Interrupt Enable */
+};
+
+/* Both transmission and reception */
+#define INFO1_FMT BIT(2)
+#define INFO1_TXC BIT(3)
+
+/* For transmission */
+#define INFO1_TSUN(val) ((u64)(val) << 8ULL)
+#define INFO1_CSD0(index) ((u64)(index) << 32ULL)
+#define INFO1_CSD1(index) ((u64)(index) << 40ULL)
+#define INFO1_DV(port_vector) ((u64)(port_vector) << 48ULL)
+
+/* For reception */
+#define INFO1_SPN(port) ((u64)(port) << 36ULL)
+
+struct rswitch_desc {
+ __le16 info_ds; /* Descriptor size */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __u8 dptrh; /* Descriptor pointer MSB */
+ __le32 dptrl; /* Descriptor pointer LSW */
+} __packed;
+
+struct rswitch_ts_desc {
+ struct rswitch_desc desc;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rswitch_ext_desc {
+ struct rswitch_desc desc;
+ __le64 info1;
+} __packed;
+
+struct rswitch_ext_ts_desc {
+ struct rswitch_desc desc;
+ __le64 info1;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rswitch_etha {
+ int index;
+ void __iomem *addr;
+ void __iomem *coma_addr;
+ bool external_phy;
+ struct mii_bus *mii;
+ phy_interface_t phy_interface;
+ u8 mac_addr[MAX_ADDR_LEN];
+ int link;
+ int speed;
+
+ /* This hardware could not be initialized twice so that marked
+ * this flag to avoid multiple initialization.
+ */
+ bool operated;
+};
+
+/* The datasheet said descriptor "chain" and/or "queue". For consistency of
+ * name, this driver calls "queue".
+ */
+struct rswitch_gwca_queue {
+ int index;
+ bool dir_tx;
+ bool gptp;
+ union {
+ struct rswitch_ext_desc *ring;
+ struct rswitch_ext_ts_desc *ts_ring;
+ };
+ dma_addr_t ring_dma;
+ int ring_size;
+ int cur;
+ int dirty;
+ struct sk_buff **skbs;
+
+ struct net_device *ndev; /* queue to ndev for irq */
+};
+
+#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
+struct rswitch_gwca {
+ int index;
+ struct rswitch_gwca_queue *queues;
+ int num_queues;
+ DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
+ u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
+ u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
+ int speed;
+};
+
+#define NUM_QUEUES_PER_NDEV 2
+struct rswitch_device {
+ struct rswitch_private *priv;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ void __iomem *addr;
+ struct rswitch_gwca_queue *tx_queue;
+ struct rswitch_gwca_queue *rx_queue;
+ u8 ts_tag;
+
+ int port;
+ struct rswitch_etha *etha;
+};
+
+struct rswitch_mfwd_mac_table_entry {
+ int queue_index;
+ unsigned char addr[MAX_ADDR_LEN];
+};
+
+struct rswitch_mfwd {
+ struct rswitch_mac_table_entry *mac_table_entries;
+ int num_mac_table_entries;
+};
+
+struct rswitch_private {
+ struct platform_device *pdev;
+ void __iomem *addr;
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rswitch_desc *linkfix_table;
+ dma_addr_t linkfix_table_dma;
+ u32 linkfix_table_size;
+
+ struct rswitch_device *rdev[RSWITCH_NUM_PORTS];
+
+ struct rswitch_gwca gwca;
+ struct rswitch_etha etha[RSWITCH_NUM_PORTS];
+ struct rswitch_mfwd mfwd;
+
+ bool gwca_halt;
+};
+
+#endif /* #ifndef __RSWITCH_H__ */
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 58cf7cc54f40..826990459fa4 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1821,19 +1821,17 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
const struct ofdpa_fdb_learn_work *lw =
container_of(work, struct ofdpa_fdb_learn_work, work);
bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
- bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
struct switchdev_notifier_fdb_info info = {};
+ enum switchdev_notifier_type event;
info.addr = lw->addr;
info.vid = lw->vid;
+ info.offloaded = !removing;
+ event = removing ? SWITCHDEV_FDB_DEL_TO_BRIDGE :
+ SWITCHDEV_FDB_ADD_TO_BRIDGE;
rtnl_lock();
- if (learned && removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- lw->ofdpa_port->dev, &info.info, NULL);
- else if (learned && !removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- lw->ofdpa_port->dev, &info.info, NULL);
+ call_switchdev_notifiers(event, lw->ofdpa_port->dev, &info.info, NULL);
rtnl_unlock();
kfree(work);
@@ -1865,6 +1863,9 @@ static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
if (!ofdpa_port_is_bridged(ofdpa_port))
return 0;
+ if (!(flags & OFDPA_OP_FLAG_LEARNED))
+ return 0;
+
lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
if (!lw)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index b5e45fc6337e..712a48d00069 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -9,7 +9,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
- mae.o tc.o tc_bindings.o
+ mae.o tc.o tc_bindings.o tc_counters.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 135ece2f1375..702abbe59b76 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -43,8 +43,6 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_sset_count = efx_ethtool_get_sset_count,
- .get_priv_flags = efx_ethtool_get_priv_flags,
- .set_priv_flags = efx_ethtool_set_priv_flags,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.get_link_ksettings = efx_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 65bbe37753e6..83d9db71d7d7 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -21,7 +21,7 @@
/* Get the value of a field in the RX prefix */
#define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
#define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
-#define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
+#define PREFIX_WIDTH_MASK(_f) ((1ULL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
#define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
#define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
PREFIX_WIDTH_MASK(_f))
@@ -67,6 +67,13 @@ void __ef100_rx_packet(struct efx_channel *channel)
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
+ if (channel->type->receive_raw) {
+ u32 mark = PREFIX_FIELD(prefix, USER_MARK);
+
+ if (channel->type->receive_raw(rx_queue, mark))
+ return; /* packet was consumed */
+ }
+
if (ef100_has_fcs_error(channel, prefix) &&
unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
goto out;
@@ -183,24 +190,32 @@ void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
void ef100_rx_write(struct efx_rx_queue *rx_queue)
{
+ unsigned int notified_count = rx_queue->notified_count;
struct efx_rx_buffer *rx_buf;
unsigned int idx;
efx_qword_t *rxd;
efx_dword_t rxdb;
- while (rx_queue->notified_count != rx_queue->added_count) {
- idx = rx_queue->notified_count & rx_queue->ptr_mask;
+ while (notified_count != rx_queue->added_count) {
+ idx = notified_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, idx);
rxd = efx_rx_desc(rx_queue, idx);
EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
- ++rx_queue->notified_count;
+ ++notified_count;
}
+ if (notified_count == rx_queue->notified_count)
+ return;
wmb();
EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
rx_queue->added_count & rx_queue->ptr_mask);
efx_writed_page(rx_queue->efx, &rxdb,
ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
+ if (rx_queue->grant_credits)
+ wmb();
+ rx_queue->notified_count = notified_count;
+ if (rx_queue->grant_credits)
+ schedule_work(&rx_queue->grant_work);
}
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 102ddc7e206a..29ffaf35559d 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -367,7 +367,8 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
* Returns 0 on success, error code otherwise. In case of an error this
* function will free the SKB.
*/
-int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
{
return __ef100_enqueue_skb(tx_queue, skb, NULL);
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index aaa381743bca..fcea3ea809d7 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -1119,6 +1119,8 @@ void efx_start_channels(struct efx_nic *efx)
struct efx_channel *channel;
efx_for_each_channel_rev(channel, efx) {
+ if (channel->type->start)
+ channel->type->start(channel);
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
atomic_inc(&efx->active_queues);
@@ -1143,8 +1145,13 @@ void efx_stop_channels(struct efx_nic *efx)
struct efx_channel *channel;
int rc = 0;
- /* Stop RX refill */
+ /* Stop special channels and RX refill.
+ * The channel's stop has to be called first, since it might wait
+ * for a sentinel RX to indicate the channel has fully drained.
+ */
efx_for_each_channel(channel, efx) {
+ if (channel->type->stop)
+ channel->type->stop(channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
rx_queue->refill_enabled = false;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index c2224e41a694..cc30524c2fe4 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1164,7 +1164,7 @@ static ssize_t mcdi_logging_show(struct device *dev,
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+ return sysfs_emit(buf, "%d\n", mcdi->logging_enabled);
}
static ssize_t mcdi_logging_store(struct device *dev,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 6649a2327d03..a8cbceeb301b 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -101,14 +101,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
-static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "log-tc-errors",
-};
-
-#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0)
-
-#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings)
-
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
@@ -460,8 +452,6 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
- case ETH_SS_PRIV_FLAGS:
- return EFX_ETHTOOL_PRIV_FLAGS_COUNT;
default:
return -EINVAL;
}
@@ -488,39 +478,12 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
- case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_ethtool_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- break;
default:
/* No other string sets */
break;
}
}
-u32 efx_ethtool_get_priv_flags(struct net_device *net_dev)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- u32 ret_flags = 0;
-
- if (efx->log_tc_errs)
- ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS;
-
- return ret_flags;
-}
-
-int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
-
- efx->log_tc_errs =
- !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS);
-
- return 0;
-}
-
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 0afc74021a5e..659491932101 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -27,8 +27,6 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings);
-u32 efx_ethtool_get_priv_flags(struct net_device *net_dev);
-int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags);
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __attribute__ ((unused)),
u64 *data);
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 874c765b2465..583baf69981c 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -112,6 +112,117 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
return 0;
}
+int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN);
+ u32 out_flags;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_WORD(inbuf, MAE_COUNTERS_STREAM_START_V2_IN_QID,
+ efx_rx_queue_index(rx_queue));
+ MCDI_SET_WORD(inbuf, MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE,
+ efx->net_dev->mtu);
+ MCDI_SET_DWORD(inbuf, MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK,
+ BIT(MAE_COUNTER_TYPE_AR) | BIT(MAE_COUNTER_TYPE_CT) |
+ BIT(MAE_COUNTER_TYPE_OR));
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTERS_STREAM_START,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ out_flags = MCDI_DWORD(outbuf, MAE_COUNTERS_STREAM_START_OUT_FLAGS);
+ if (out_flags & BIT(MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "MAE counter stream uses credits\n");
+ rx_queue->grant_credits = true;
+ out_flags &= ~BIT(MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST);
+ }
+ if (out_flags) {
+ netif_err(efx, drv, efx->net_dev,
+ "MAE counter stream start: unrecognised flags %x\n",
+ out_flags);
+ goto out_stop;
+ }
+ return 0;
+out_stop:
+ efx_mae_stop_counters(efx, rx_queue);
+ return -EOPNOTSUPP;
+}
+
+static bool efx_mae_counters_flushed(u32 *flush_gen, u32 *seen_gen)
+{
+ int i;
+
+ for (i = 0; i < EFX_TC_COUNTER_TYPE_MAX; i++)
+ if ((s32)(flush_gen[i] - seen_gen[i]) > 0)
+ return false;
+ return true;
+}
+
+int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN);
+ size_t outlen;
+ int rc, i;
+
+ MCDI_SET_WORD(inbuf, MAE_COUNTERS_STREAM_STOP_IN_QID,
+ efx_rx_queue_index(rx_queue));
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTERS_STREAM_STOP,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ if (rc)
+ return rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "Draining counters:\n");
+ /* Only process received generation counts */
+ for (i = 0; (i < (outlen / 4)) && (i < EFX_TC_COUNTER_TYPE_MAX); i++) {
+ efx->tc->flush_gen[i] = MCDI_ARRAY_DWORD(outbuf,
+ MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT,
+ i);
+ netif_dbg(efx, drv, efx->net_dev,
+ "\ttype %u, awaiting gen %u\n", i,
+ efx->tc->flush_gen[i]);
+ }
+
+ efx->tc->flush_counters = true;
+
+ /* Drain can take up to 2 seconds owing to FWRIVERHD-2884; whatever
+ * timeout we use, that delay is added to unload on nonresponsive
+ * hardware, so 2500ms seems like a reasonable compromise.
+ */
+ if (!wait_event_timeout(efx->tc->flush_wq,
+ efx_mae_counters_flushed(efx->tc->flush_gen,
+ efx->tc->seen_gen),
+ msecs_to_jiffies(2500)))
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to drain counters RXQ, FW may be unhappy\n");
+
+ efx->tc->flush_counters = false;
+
+ return rc;
+}
+
+void efx_mae_counters_grant_credits(struct work_struct *work)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN);
+ struct efx_rx_queue *rx_queue = container_of(work, struct efx_rx_queue,
+ grant_work);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int credits;
+
+ BUILD_BUG_ON(MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT_LEN);
+ credits = READ_ONCE(rx_queue->notified_count) - rx_queue->granted_count;
+ MCDI_SET_DWORD(inbuf, MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS,
+ credits);
+ if (!efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS,
+ inbuf, sizeof(inbuf), NULL, 0, NULL))
+ rx_queue->granted_count += credits;
+}
+
static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
@@ -250,6 +361,32 @@ static int efx_mae_match_check_cap_typ(u8 support, enum mask_type typ)
}
}
+/* Validate field mask against hardware capabilities. Captures caller's 'rc' */
+#define CHECK(_mcdi, _field) ({ \
+ enum mask_type typ = classify_mask((const u8 *)&mask->_field, \
+ sizeof(mask->_field)); \
+ \
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_ ## _mcdi],\
+ typ); \
+ if (rc) \
+ NL_SET_ERR_MSG_FMT_MOD(extack, \
+ "No support for %s mask in field %s", \
+ mask_type_name(typ), #_field); \
+ rc; \
+})
+/* Booleans need special handling */
+#define CHECK_BIT(_mcdi, _field) ({ \
+ enum mask_type typ = mask->_field ? MASK_ONES : MASK_ZEROES; \
+ \
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_ ## _mcdi],\
+ typ); \
+ if (rc) \
+ NL_SET_ERR_MSG_FMT_MOD(extack, \
+ "No support for %s mask in field %s", \
+ mask_type_name(typ), #_field); \
+ rc; \
+})
+
int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack)
@@ -265,11 +402,86 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- efx_tc_err(efx, "No support for %s mask in field ingress_port\n",
- mask_type_name(ingress_port_mask_type));
- NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port");
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field ingress_port",
+ mask_type_name(ingress_port_mask_type));
return rc;
}
+ if (CHECK(ETHER_TYPE, eth_proto) ||
+ CHECK(VLAN0_TCI, vlan_tci[0]) ||
+ CHECK(VLAN0_PROTO, vlan_proto[0]) ||
+ CHECK(VLAN1_TCI, vlan_tci[1]) ||
+ CHECK(VLAN1_PROTO, vlan_proto[1]) ||
+ CHECK(ETH_SADDR, eth_saddr) ||
+ CHECK(ETH_DADDR, eth_daddr) ||
+ CHECK(IP_PROTO, ip_proto) ||
+ CHECK(IP_TOS, ip_tos) ||
+ CHECK(IP_TTL, ip_ttl) ||
+ CHECK(SRC_IP4, src_ip) ||
+ CHECK(DST_IP4, dst_ip) ||
+#ifdef CONFIG_IPV6
+ CHECK(SRC_IP6, src_ip6) ||
+ CHECK(DST_IP6, dst_ip6) ||
+#endif
+ CHECK(L4_SPORT, l4_sport) ||
+ CHECK(L4_DPORT, l4_dport) ||
+ CHECK(TCP_FLAGS, tcp_flags) ||
+ CHECK_BIT(IS_IP_FRAG, ip_frag) ||
+ CHECK_BIT(IP_FIRST_FRAG, ip_firstfrag) ||
+ CHECK(RECIRC_ID, recirc_id))
+ return rc;
+ return 0;
+}
+#undef CHECK_BIT
+#undef CHECK
+
+int efx_mae_allocate_counter(struct efx_nic *efx, struct efx_tc_counter *cnt)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTER_ALLOC_V2_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ if (!cnt)
+ return -EINVAL;
+
+ MCDI_SET_DWORD(inbuf, MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT, 1);
+ MCDI_SET_DWORD(inbuf, MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE, cnt->type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTER_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ /* pcol says this can't happen, since count is 1 */
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ cnt->fw_id = MCDI_DWORD(outbuf, MAE_COUNTER_ALLOC_OUT_COUNTER_ID);
+ cnt->gen = MCDI_DWORD(outbuf, MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT);
+ return 0;
+}
+
+int efx_mae_free_counter(struct efx_nic *efx, struct efx_tc_counter *cnt)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTER_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTER_FREE_V2_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT, 1);
+ MCDI_SET_DWORD(inbuf, MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID, cnt->fw_id);
+ MCDI_SET_DWORD(inbuf, MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE, cnt->type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTER_FREE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ /* pcol says this can't happen, since count is 1 */
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what counters exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID) !=
+ cnt->fw_id))
+ return -EIO;
return 0;
}
@@ -289,8 +501,12 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
- MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+ if (act->count && !WARN_ON(!act->count->cnt))
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+ act->count->cnt->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+ MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
@@ -440,10 +656,90 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
+ EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
+ match->value.ip_frag,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
+ match->value.ip_firstfrag);
+ EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
+ match->mask.ip_frag,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
+ match->mask.ip_firstfrag);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
match->value.recirc_id);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
match->mask.recirc_id);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE,
+ match->value.eth_proto);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK,
+ match->mask.eth_proto);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE,
+ match->value.vlan_tci[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK,
+ match->mask.vlan_tci[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE,
+ match->value.vlan_proto[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK,
+ match->mask.vlan_proto[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE,
+ match->value.vlan_tci[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK,
+ match->mask.vlan_tci[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE,
+ match->value.vlan_proto[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK,
+ match->mask.vlan_proto[1]);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE),
+ match->value.eth_saddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK),
+ match->mask.eth_saddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE),
+ match->value.eth_daddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK),
+ match->mask.eth_daddr, ETH_ALEN);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO,
+ match->value.ip_proto);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK,
+ match->mask.ip_proto);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS,
+ match->value.ip_tos);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK,
+ match->mask.ip_tos);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL,
+ match->value.ip_ttl);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK,
+ match->mask.ip_ttl);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE,
+ match->value.src_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK,
+ match->mask.src_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE,
+ match->value.dst_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK,
+ match->mask.dst_ip);
+#ifdef CONFIG_IPV6
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE),
+ &match->value.src_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK),
+ &match->mask.src_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE),
+ &match->value.dst_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK),
+ &match->mask.dst_ip6, sizeof(struct in6_addr));
+#endif
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE,
+ match->value.l4_sport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK,
+ match->mask.l4_sport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE,
+ match->value.l4_dport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK,
+ match->mask.l4_dport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE,
+ match->value.tcp_flags);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK,
+ match->mask.tcp_flags);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 3e0cd238d523..72343e90e222 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -27,6 +27,10 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
+int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
+void efx_mae_counters_grant_credits(struct work_struct *work);
+
#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
struct mae_caps {
@@ -41,6 +45,9 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack);
+int efx_mae_allocate_counter(struct efx_nic *efx, struct efx_tc_counter *cnt);
+int efx_mae_free_counter(struct efx_nic *efx, struct efx_tc_counter *cnt);
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
diff --git a/drivers/net/ethernet/sfc/mae_counter_format.h b/drivers/net/ethernet/sfc/mae_counter_format.h
new file mode 100644
index 000000000000..7e252e393fbe
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae_counter_format.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2020 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Format of counter packets (version 2) from the ef100 Match-Action Engine */
+
+#ifndef EFX_MAE_COUNTER_FORMAT_H
+#define EFX_MAE_COUNTER_FORMAT_H
+
+
+/*------------------------------------------------------------*/
+/*
+ * ER_RX_SL_PACKETISER_HEADER_WORD(160bit):
+ *
+ */
+#define ER_RX_SL_PACKETISER_HEADER_WORD_SIZE 20
+#define ER_RX_SL_PACKETISER_HEADER_WORD_WIDTH 160
+
+#define ERF_SC_PACKETISER_HEADER_VERSION_LBN 0
+#define ERF_SC_PACKETISER_HEADER_VERSION_WIDTH 8
+#define ERF_SC_PACKETISER_HEADER_VERSION_VALUE 2
+#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_LBN 8
+#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_WIDTH 8
+#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR 0
+#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_CT 1
+#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_OR 2
+#define ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_LBN 16
+#define ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_WIDTH 8
+#define ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT 0x4
+#define ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET_LBN 24
+#define ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET_WIDTH 8
+#define ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET_DEFAULT 0x14
+#define ERF_SC_PACKETISER_HEADER_INDEX_LBN 32
+#define ERF_SC_PACKETISER_HEADER_INDEX_WIDTH 16
+#define ERF_SC_PACKETISER_HEADER_COUNT_LBN 48
+#define ERF_SC_PACKETISER_HEADER_COUNT_WIDTH 16
+#define ERF_SC_PACKETISER_HEADER_RESERVED_0_LBN 64
+#define ERF_SC_PACKETISER_HEADER_RESERVED_0_WIDTH 32
+#define ERF_SC_PACKETISER_HEADER_RESERVED_1_LBN 96
+#define ERF_SC_PACKETISER_HEADER_RESERVED_1_WIDTH 32
+#define ERF_SC_PACKETISER_HEADER_RESERVED_2_LBN 128
+#define ERF_SC_PACKETISER_HEADER_RESERVED_2_WIDTH 32
+
+
+/*------------------------------------------------------------*/
+/*
+ * ER_RX_SL_PACKETISER_PAYLOAD_WORD(128bit):
+ *
+ */
+#define ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE 16
+#define ER_RX_SL_PACKETISER_PAYLOAD_WORD_WIDTH 128
+
+#define ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_LBN 0
+#define ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_WIDTH 24
+#define ERF_SC_PACKETISER_PAYLOAD_RESERVED_LBN 24
+#define ERF_SC_PACKETISER_PAYLOAD_RESERVED_WIDTH 8
+#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_OFST 4
+#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_SIZE 6
+#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LBN 32
+#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_WIDTH 48
+#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_OFST 10
+#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_SIZE 6
+#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LBN 80
+#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_WIDTH 48
+
+
+#endif /* EFX_MAE_COUNTER_FORMAT_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 1f18e9dc62e8..7e35fec9da35 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -221,15 +221,32 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
+#define MCDI_SET_WORD(_buf, _field, _value) do { \
+ BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \
+ BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \
+ *(__force __le16 *)MCDI_PTR(_buf, _field) = cpu_to_le16(_value);\
+ } while (0)
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+/* Write a 16-bit field defined in the protocol as being big-endian. */
+#define MCDI_STRUCT_SET_WORD_BE(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 2); \
+ BUILD_BUG_ON(_field ## _OFST & 1); \
+ *(__force __be16 *)MCDI_STRUCT_PTR(_buf, _field) = (_value); \
+ } while (0)
#define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+/* Write a 32-bit field defined in the protocol as being big-endian. */
+#define MCDI_STRUCT_SET_DWORD_BE(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 4); \
+ BUILD_BUG_ON(_field ## _OFST & 3); \
+ *(__force __be32 *)MCDI_STRUCT_PTR(_buf, _field) = (_value); \
+ } while (0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2e9ba0cfe848..3b49e216768b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -56,7 +56,8 @@
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
#define EFX_EXTRA_CHANNEL_IOV 0
#define EFX_EXTRA_CHANNEL_PTP 1
-#define EFX_MAX_EXTRA_CHANNELS 2U
+#define EFX_EXTRA_CHANNEL_TC 2
+#define EFX_MAX_EXTRA_CHANNELS 3U
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
@@ -363,8 +364,12 @@ struct efx_rx_page_state {
* @refill_enabled: Enable refill whenever fill level is low
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as
* @rxq_flush_pending.
+ * @grant_credits: Posted RX descriptors need to be granted to the MAE with
+ * %MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS. For %EFX_EXTRA_CHANNEL_TC,
+ * and only supported on EF100.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @granted_count: Number of buffers granted to the MAE (<= @notified_count).
* @removed_count: Number of buffers removed from the receive queue.
* @scatter_n: Used by NIC specific receive code.
* @scatter_len: Used by NIC specific receive code.
@@ -385,6 +390,7 @@ struct efx_rx_page_state {
* refill was triggered.
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @grant_work: workitem used to grant credits to the MAE if @grant_credits
* @xdp_rxq_info: XDP specific RX queue information.
* @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
@@ -396,9 +402,11 @@ struct efx_rx_queue {
unsigned int ptr_mask;
bool refill_enabled;
bool flush_pending;
+ bool grant_credits;
unsigned int added_count;
unsigned int notified_count;
+ unsigned int granted_count;
unsigned int removed_count;
unsigned int scatter_n;
unsigned int scatter_len;
@@ -416,6 +424,7 @@ struct efx_rx_queue {
unsigned int recycle_count;
struct timer_list slow_fill;
unsigned int slow_fill_count;
+ struct work_struct grant_work;
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
struct xdp_rxq_info xdp_rxq_info;
@@ -577,12 +586,15 @@ struct efx_msi_context {
* struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel
* @pre_probe: Set up extra state prior to initialisation
+ * @start: called early in efx_start_channels()
+ * @stop: called early in efx_stop_channels()
* @post_remove: Tear down extra state after finalisation, if allocated.
* May be called on channels that have not been probed.
* @get_name: Generate the channel's name (used for its IRQ handler)
* @copy: Copy the channel state prior to reallocation. May be %NULL if
* reallocation is not supported.
* @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @receive_raw: Handle an RX buffer ready to be passed to __efx_rx_packet()
* @want_txqs: Determine whether this channel should have TX queues
* created. If %NULL, TX queues are not created.
* @keep_eventq: Flag for whether event queue should be kept initialised
@@ -593,10 +605,13 @@ struct efx_msi_context {
struct efx_channel_type {
void (*handle_no_channel)(struct efx_nic *);
int (*pre_probe)(struct efx_channel *);
+ int (*start)(struct efx_channel *);
+ void (*stop)(struct efx_channel *);
void (*post_remove)(struct efx_channel *);
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*receive_raw)(struct efx_rx_queue *, u32);
bool (*want_txqs)(struct efx_channel *);
bool keep_eventq;
bool want_pio;
@@ -855,7 +870,6 @@ enum efx_xdp_tx_queues_mode {
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irqs_hooked: Channel interrupts are hooked
- * @log_tc_errs: Error logging for TC filter insertion is enabled
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
@@ -1018,7 +1032,6 @@ struct efx_nic {
unsigned int timer_max_ns;
bool irq_rx_adaptive;
bool irqs_hooked;
- bool log_tc_errs;
unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us;
u32 msg_enable;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index eaef4a15008a..9f07e1ba7780 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -351,7 +351,7 @@ struct efx_ptp_data {
void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
};
-static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
static int efx_phc_settime(struct ptp_clock_info *ptp,
@@ -1508,7 +1508,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 1,
- .adjfreq = efx_phc_adjfreq,
+ .adjfine = efx_phc_adjfine,
.adjtime = efx_phc_adjtime,
.gettime64 = efx_phc_gettime,
.settime64 = efx_phc_settime,
@@ -2137,11 +2137,12 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
ptp->ts_corrections.general_rx);
}
-static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
+ s32 delta = scaled_ppm_to_ppb(scaled_ppm);
struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 9220afeddee8..d2f35ee15eff 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -229,6 +229,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
+ rx_queue->granted_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
efx_init_rx_recycle_ring(rx_queue);
@@ -281,6 +282,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
del_timer_sync(&rx_queue->slow_fill);
+ if (rx_queue->grant_credits)
+ flush_work(&rx_queue->grant_work);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index 1fd396b00bfb..e4b294b8e9ac 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -1178,7 +1178,7 @@ static ssize_t mcdi_logging_show(struct device *dev,
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+ return sysfs_emit(buf, "%d\n", mcdi->logging_enabled);
}
static ssize_t mcdi_logging_store(struct device *dev,
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 7c46752e6eae..38e666561bcd 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -347,7 +347,7 @@ struct efx_ptp_data {
void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
};
-static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
static int efx_phc_settime(struct ptp_clock_info *ptp,
@@ -1429,7 +1429,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 1,
- .adjfreq = efx_phc_adjfreq,
+ .adjfine = efx_phc_adjfine,
.adjtime = efx_phc_adjtime,
.gettime64 = efx_phc_gettime,
.settime64 = efx_phc_settime,
@@ -2044,11 +2044,12 @@ void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
ptp->ts_corrections.general_rx);
}
-static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
+ s32 delta = scaled_ppm_to_ppb(scaled_ppm);
struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 3478860d4023..deeaab9ee761 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -77,6 +77,8 @@ static void efx_tc_free_action_set(struct efx_nic *efx,
*/
list_del(&act->list);
}
+ if (act->count)
+ efx_tc_flower_put_counter_index(efx, act->count);
kfree(act);
}
@@ -124,50 +126,187 @@ static void efx_tc_flow_free(void *ptr, void *arg)
kfree(rule);
}
+/* Boilerplate for the simple 'copy a field' cases */
+#define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \
+if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) { \
+ struct flow_match_##_type fm; \
+ \
+ flow_rule_match_##_tcget(rule, &fm); \
+ match->value._field = fm.key->_tcfield; \
+ match->mask._field = fm.mask->_tcfield; \
+}
+#define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field) \
+ _MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field)
+#define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \
+ _MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field)
+
static int efx_tc_flower_parse_match(struct efx_nic *efx,
struct flow_rule *rule,
struct efx_tc_match *match,
struct netlink_ext_ack *extack)
{
struct flow_dissector *dissector = rule->match.dissector;
+ unsigned char ipv = 0;
+ /* Owing to internal TC infelicities, the IPV6_ADDRS key might be set
+ * even on IPv4 filters; so rather than relying on dissector->used_keys
+ * we check the addr_type in the CONTROL key. If we don't find it (or
+ * it's masked, which should never happen), we treat both IPV4_ADDRS
+ * and IPV6_ADDRS as absent.
+ */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control fm;
flow_rule_match_control(rule, &fm);
+ if (IS_ALL_ONES(fm.mask->addr_type))
+ switch (fm.key->addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ ipv = 4;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ ipv = 6;
+ break;
+ default:
+ break;
+ }
- if (fm.mask->flags) {
- efx_tc_err(efx, "Unsupported match on control.flags %#x\n",
- fm.mask->flags);
- NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags");
+ if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT;
+ match->mask.ip_frag = true;
+ }
+ if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG;
+ match->mask.ip_firstfrag = true;
+ }
+ if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
+ fm.mask->flags);
return -EOPNOTSUPP;
}
}
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC))) {
- efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys);
- NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered");
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
+ BIT(FLOW_DISSECTOR_KEY_IP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_match_basic fm;
+ MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto);
+ /* Make sure we're IP if any L3/L4 keys used. */
+ if (!IS_ALL_ONES(match->mask.eth_proto) ||
+ !(match->value.eth_proto == htons(ETH_P_IP) ||
+ match->value.eth_proto == htons(ETH_P_IPV6)))
+ if (dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_TCP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]",
+ dissector->used_keys);
+ return -EINVAL;
+ }
- flow_rule_match_basic(rule, &fm);
- if (fm.mask->n_proto) {
- EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n");
- return -EOPNOTSUPP;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan fm;
+
+ flow_rule_match_vlan(rule, &fm);
+ if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) {
+ match->value.vlan_proto[0] = fm.key->vlan_tpid;
+ match->mask.vlan_proto[0] = fm.mask->vlan_tpid;
+ match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 |
+ fm.key->vlan_id);
+ match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 |
+ fm.mask->vlan_id);
}
- if (fm.mask->ip_proto) {
- EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n");
- return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan fm;
+
+ flow_rule_match_cvlan(rule, &fm);
+ if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) {
+ match->value.vlan_proto[1] = fm.key->vlan_tpid;
+ match->mask.vlan_proto[1] = fm.mask->vlan_tpid;
+ match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 |
+ fm.key->vlan_id);
+ match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 |
+ fm.mask->vlan_id);
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs fm;
+
+ flow_rule_match_eth_addrs(rule, &fm);
+ ether_addr_copy(match->value.eth_saddr, fm.key->src);
+ ether_addr_copy(match->value.eth_daddr, fm.key->dst);
+ ether_addr_copy(match->mask.eth_saddr, fm.mask->src);
+ ether_addr_copy(match->mask.eth_daddr, fm.mask->dst);
+ }
+
+ MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto);
+ /* Make sure we're TCP/UDP if any L4 keys used. */
+ if ((match->value.ip_proto != IPPROTO_UDP &&
+ match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto))
+ if (dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp",
+ dissector->used_keys);
+ return -EINVAL;
+ }
+ MAP_KEY_AND_MASK(IP, ip, tos, ip_tos);
+ MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl);
+ if (ipv == 4) {
+ MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip);
+ MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip);
+ }
+#ifdef CONFIG_IPV6
+ else if (ipv == 6) {
+ MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6);
+ MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6);
+ }
+#endif
+ MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport);
+ MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport);
+ MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags);
+
return 0;
}
+/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */
+enum efx_tc_action_order {
+ EFX_TC_AO_COUNT,
+ EFX_TC_AO_DELIVER
+};
+/* Determine whether we can add @new action without violating order */
+static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
+ enum efx_tc_action_order new)
+{
+ switch (new) {
+ case EFX_TC_AO_COUNT:
+ if (act->count)
+ return false;
+ fallthrough;
+ case EFX_TC_AO_DELIVER:
+ return !act->deliver;
+ default:
+ /* Bad caller. Whatever they wanted to do, say they can't. */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+}
+
static int efx_tc_flower_replace(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc,
@@ -200,13 +339,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
if (efv != from_efv) {
/* can't happen */
- efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n",
- netdev_name(net_dev), efv ? "non-" : "",
- from_efv ? "non-" : "");
- if (efv)
- NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)");
- else
- NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)");
+ NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)",
+ netdev_name(net_dev), efv ? "non-" : "",
+ from_efv ? "non-" : "");
return -EINVAL;
}
@@ -214,7 +349,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
memset(&match, 0, sizeof(match));
rc = efx_tc_flower_external_mport(efx, from_efv);
if (rc < 0) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port");
return rc;
}
match.value.ingress_port = rc;
@@ -224,7 +359,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
return rc;
if (tc->common.chain_index) {
- EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index");
+ NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
return -EOPNOTSUPP;
}
match.mask.recirc_id = 0xff;
@@ -261,16 +396,57 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
if (!act) {
/* more actions after a non-pipe action */
- EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action");
+ NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
rc = -EINVAL;
goto release;
}
+ if ((fa->id == FLOW_ACTION_REDIRECT ||
+ fa->id == FLOW_ACTION_MIRRED ||
+ fa->id == FLOW_ACTION_DROP) && fa->hw_stats) {
+ struct efx_tc_counter_index *ctr;
+
+ /* Currently the only actions that want stats are
+ * mirred and gact (ok, shot, trap, goto-chain), which
+ * means we want stats just before delivery. Also,
+ * note that tunnel_key set shouldn't change the length
+ * — it's only the subsequent mirred that does that,
+ * and the stats are taken _before_ the mirred action
+ * happens.
+ */
+ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) {
+ /* All supported actions that count either steal
+ * (gact shot, mirred redirect) or clone act
+ * (mirred mirror), so we should never get two
+ * count actions on one action_set.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Count-action conflict (can't happen)");
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+
+ if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "hw_stats_type %u not supported (only 'delayed')",
+ fa->hw_stats);
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+
+ ctr = efx_tc_flower_get_counter_index(efx, tc->cookie,
+ EFX_TC_COUNTER_TYPE_AR);
+ if (IS_ERR(ctr)) {
+ rc = PTR_ERR(ctr);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
+ goto release;
+ }
+ act->count = ctr;
+ }
+
switch (fa->id) {
case FLOW_ACTION_DROP:
rc = efx_mae_alloc_action_set(efx, act);
if (rc) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)");
goto release;
}
list_add_tail(&act->list, &rule->acts.list);
@@ -279,22 +455,30 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED:
save = *act;
+
+ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) {
+ /* can't happen */
+ rc = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Deliver action violates action order (can't happen)");
+ goto release;
+ }
+
to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
if (IS_ERR(to_efv)) {
- EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch");
+ NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch");
rc = PTR_ERR(to_efv);
goto release;
}
rc = efx_tc_flower_external_mport(efx, to_efv);
if (rc < 0) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port");
goto release;
}
act->dest_mport = rc;
act->deliver = 1;
rc = efx_mae_alloc_action_set(efx, act);
if (rc) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)");
goto release;
}
list_add_tail(&act->list, &rule->acts.list);
@@ -302,6 +486,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
if (fa->id == FLOW_ACTION_REDIRECT)
break; /* end of the line */
/* Mirror, so continue on with saved act */
+ save.count = NULL;
act = kzalloc(sizeof(*act), GFP_USER);
if (!act) {
rc = -ENOMEM;
@@ -310,9 +495,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
*act = save;
break;
default:
- efx_tc_err(efx, "Unhandled action %u\n", fa->id);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
+ fa->id);
rc = -EOPNOTSUPP;
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
goto release;
}
}
@@ -334,7 +519,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
act->deliver = 1;
rc = efx_mae_alloc_action_set(efx, act);
if (rc) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)");
goto release;
}
list_add_tail(&act->list, &rule->acts.list);
@@ -349,13 +534,13 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
if (rc) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw");
goto release;
}
rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
rule->acts.fw_id, &rule->fw_id);
if (rc) {
- EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
goto release_acts;
}
return 0;
@@ -410,6 +595,42 @@ static int efx_tc_flower_destroy(struct efx_nic *efx,
return 0;
}
+static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_counter_index *ctr;
+ struct efx_tc_counter *cnt;
+ u64 packets, bytes;
+
+ ctr = efx_tc_flower_find_counter_index(efx, tc->cookie);
+ if (!ctr) {
+ /* See comment in efx_tc_flower_destroy() */
+ if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Filter %lx not found for stats\n",
+ tc->cookie);
+ NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
+ return -ENOENT;
+ }
+ if (WARN_ON(!ctr->cnt)) /* can't happen */
+ return -EIO;
+ cnt = ctr->cnt;
+
+ spin_lock_bh(&cnt->lock);
+ /* Report only new pkts/bytes since last time TC asked */
+ packets = cnt->packets;
+ bytes = cnt->bytes;
+ flow_stats_update(&tc->stats, bytes - cnt->old_bytes,
+ packets - cnt->old_packets, 0, cnt->touched,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ cnt->old_packets = packets;
+ cnt->old_bytes = bytes;
+ spin_unlock_bh(&cnt->lock);
+ return 0;
+}
+
int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
struct flow_cls_offload *tc, struct efx_rep *efv)
{
@@ -426,6 +647,9 @@ int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
case FLOW_CLS_DESTROY:
rc = efx_tc_flower_destroy(efx, net_dev, tc);
break;
+ case FLOW_CLS_STATS:
+ rc = efx_tc_flower_stats(efx, net_dev, tc);
+ break;
default:
rc = -EOPNOTSUPP;
break;
@@ -641,6 +865,10 @@ int efx_init_struct_tc(struct efx_nic *efx)
INIT_LIST_HEAD(&efx->tc->block_list);
mutex_init(&efx->tc->mutex);
+ init_waitqueue_head(&efx->tc->flush_wq);
+ rc = efx_tc_init_counters(efx);
+ if (rc < 0)
+ goto fail_counters;
rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
if (rc < 0)
goto fail_match_action_ht;
@@ -650,8 +878,11 @@ int efx_init_struct_tc(struct efx_nic *efx)
efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
return 0;
fail_match_action_ht:
+ efx_tc_destroy_counters(efx);
+fail_counters:
mutex_destroy(&efx->tc->mutex);
kfree(efx->tc->caps);
fail_alloc_caps:
@@ -672,6 +903,7 @@ void efx_fini_struct_tc(struct efx_nic *efx)
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
efx);
+ efx_tc_fini_counters(efx);
mutex_unlock(&efx->tc->mutex);
mutex_destroy(&efx->tc->mutex);
kfree(efx->tc->caps);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 196fd74ed973..418ce8c13a06 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -14,27 +14,13 @@
#include <net/flow_offload.h>
#include <linux/rhashtable.h>
#include "net_driver.h"
+#include "tc_counters.h"
-/* Error reporting: convenience macros. For indicating why a given filter
- * insertion is not supported; errors in internal operation or in the
- * hardware should be netif_err()s instead.
- */
-/* Used when error message is constant. */
-#define EFX_TC_ERR_MSG(efx, extack, message) do { \
- NL_SET_ERR_MSG_MOD(extack, message); \
- if (efx->log_tc_errs) \
- netif_info(efx, drv, efx->net_dev, "%s\n", message); \
-} while (0)
-/* Used when error message is not constant; caller should also supply a
- * constant extack message with NL_SET_ERR_MSG_MOD().
- */
-#define efx_tc_err(efx, fmt, args...) do { \
-if (efx->log_tc_errs) \
- netif_info(efx, drv, efx->net_dev, fmt, ##args);\
-} while (0)
+#define IS_ALL_ONES(v) (!(typeof (v))~(v))
struct efx_tc_action_set {
u16 deliver:1;
+ struct efx_tc_counter_index *count;
u32 dest_mport;
u32 fw_id; /* index of this entry in firmware actions table */
struct list_head list;
@@ -44,6 +30,20 @@ struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
u8 recirc_id;
+ /* L2 (inner when encap) */
+ __be16 eth_proto;
+ __be16 vlan_tci[2], vlan_proto[2];
+ u8 eth_saddr[ETH_ALEN], eth_daddr[ETH_ALEN];
+ /* L3 (when IP) */
+ u8 ip_proto, ip_tos, ip_ttl;
+ __be32 src_ip, dst_ip;
+#ifdef CONFIG_IPV6
+ struct in6_addr src_ip6, dst_ip6;
+#endif
+ bool ip_frag, ip_firstfrag;
+ /* L4 */
+ __be16 l4_sport, l4_dport; /* Ports (UDP, TCP) */
+ __be16 tcp_flags;
};
struct efx_tc_match {
@@ -76,11 +76,19 @@ enum efx_tc_rule_prios {
* @caps: MAE capabilities reported by MCDI
* @block_list: List of &struct efx_tc_block_binding
* @mutex: Used to serialise operations on TC hashtables
+ * @counter_ht: Hashtable of TC counters (FW IDs and counter values)
+ * @counter_id_ht: Hashtable mapping TC counter cookies to counters
* @match_action_ht: Hashtable of TC match-action rules
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
* @reps_mport_vport_id: vport_id for representor RX filters
+ * @flush_counters: counters have been stopped, waiting for drain
+ * @flush_gen: final generation count per type array as reported by
+ * MC_CMD_MAE_COUNTERS_STREAM_STOP
+ * @seen_gen: most recent generation count per type as seen by efx_tc_rx()
+ * @flush_wq: wait queue used by efx_mae_stop_counters() to wait for
+ * MAE counters RXQ to finish draining
* @dflt: Match-action rules for default switching; at priority
* %EFX_TC_PRIO_DFLT. Named by *ingress* port
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
@@ -91,9 +99,15 @@ struct efx_tc_state {
struct mae_caps *caps;
struct list_head block_list;
struct mutex mutex;
+ struct rhashtable counter_ht;
+ struct rhashtable counter_id_ht;
struct rhashtable match_action_ht;
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
+ bool flush_counters;
+ u32 flush_gen[EFX_TC_COUNTER_TYPE_MAX];
+ u32 seen_gen[EFX_TC_COUNTER_TYPE_MAX];
+ wait_queue_head_t flush_wq;
struct {
struct efx_tc_flow_rule pf;
struct efx_tc_flow_rule wire;
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
new file mode 100644
index 000000000000..d1a91d54c6bb
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc_counters.h"
+#include "mae_counter_format.h"
+#include "mae.h"
+#include "rx_common.h"
+
+/* Counter-management hashtables */
+
+static const struct rhashtable_params efx_tc_counter_id_ht_params = {
+ .key_len = offsetof(struct efx_tc_counter_index, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_counter_index, linkage),
+};
+
+static const struct rhashtable_params efx_tc_counter_ht_params = {
+ .key_len = offsetof(struct efx_tc_counter, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_counter, linkage),
+};
+
+static void efx_tc_counter_free(void *ptr, void *__unused)
+{
+ struct efx_tc_counter *cnt = ptr;
+
+ kfree(cnt);
+}
+
+static void efx_tc_counter_id_free(void *ptr, void *__unused)
+{
+ struct efx_tc_counter_index *ctr = ptr;
+
+ WARN_ON(refcount_read(&ctr->ref));
+ kfree(ctr);
+}
+
+int efx_tc_init_counters(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = rhashtable_init(&efx->tc->counter_id_ht, &efx_tc_counter_id_ht_params);
+ if (rc < 0)
+ goto fail_counter_id_ht;
+ rc = rhashtable_init(&efx->tc->counter_ht, &efx_tc_counter_ht_params);
+ if (rc < 0)
+ goto fail_counter_ht;
+ return 0;
+fail_counter_ht:
+ rhashtable_destroy(&efx->tc->counter_id_ht);
+fail_counter_id_ht:
+ return rc;
+}
+
+/* Only call this in init failure teardown.
+ * Normal exit should fini instead as there may be entries in the table.
+ */
+void efx_tc_destroy_counters(struct efx_nic *efx)
+{
+ rhashtable_destroy(&efx->tc->counter_ht);
+ rhashtable_destroy(&efx->tc->counter_id_ht);
+}
+
+void efx_tc_fini_counters(struct efx_nic *efx)
+{
+ rhashtable_free_and_destroy(&efx->tc->counter_id_ht, efx_tc_counter_id_free, NULL);
+ rhashtable_free_and_destroy(&efx->tc->counter_ht, efx_tc_counter_free, NULL);
+}
+
+/* Counter allocation */
+
+static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
+ int type)
+{
+ struct efx_tc_counter *cnt;
+ int rc, rc2;
+
+ cnt = kzalloc(sizeof(*cnt), GFP_USER);
+ if (!cnt)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&cnt->lock);
+ cnt->touched = jiffies;
+ cnt->type = type;
+
+ rc = efx_mae_allocate_counter(efx, cnt);
+ if (rc)
+ goto fail1;
+ rc = rhashtable_insert_fast(&efx->tc->counter_ht, &cnt->linkage,
+ efx_tc_counter_ht_params);
+ if (rc)
+ goto fail2;
+ return cnt;
+fail2:
+ /* If we get here, it implies that we couldn't insert into the table,
+ * which in turn probably means that the fw_id was already taken.
+ * In that case, it's unclear whether we really 'own' the fw_id; but
+ * the firmware seemed to think we did, so it's proper to free it.
+ */
+ rc2 = efx_mae_free_counter(efx, cnt);
+ if (rc2)
+ netif_warn(efx, hw, efx->net_dev,
+ "Failed to free MAE counter %u, rc %d\n",
+ cnt->fw_id, rc2);
+fail1:
+ kfree(cnt);
+ return ERR_PTR(rc > 0 ? -EIO : rc);
+}
+
+static void efx_tc_flower_release_counter(struct efx_nic *efx,
+ struct efx_tc_counter *cnt)
+{
+ int rc;
+
+ rhashtable_remove_fast(&efx->tc->counter_ht, &cnt->linkage,
+ efx_tc_counter_ht_params);
+ rc = efx_mae_free_counter(efx, cnt);
+ if (rc)
+ netif_warn(efx, hw, efx->net_dev,
+ "Failed to free MAE counter %u, rc %d\n",
+ cnt->fw_id, rc);
+ /* This doesn't protect counter updates coming in arbitrarily long
+ * after we deleted the counter. The RCU just ensures that we won't
+ * free the counter while another thread has a pointer to it.
+ * Ensuring we don't update the wrong counter if the ID gets re-used
+ * is handled by the generation count.
+ */
+ synchronize_rcu();
+ EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock));
+ kfree(cnt);
+}
+
+static struct efx_tc_counter *efx_tc_flower_find_counter_by_fw_id(
+ struct efx_nic *efx, int type, u32 fw_id)
+{
+ struct efx_tc_counter key = {};
+
+ key.fw_id = fw_id;
+ key.type = type;
+
+ return rhashtable_lookup_fast(&efx->tc->counter_ht, &key,
+ efx_tc_counter_ht_params);
+}
+
+/* TC cookie to counter mapping */
+
+void efx_tc_flower_put_counter_index(struct efx_nic *efx,
+ struct efx_tc_counter_index *ctr)
+{
+ if (!refcount_dec_and_test(&ctr->ref))
+ return; /* still in use */
+ rhashtable_remove_fast(&efx->tc->counter_id_ht, &ctr->linkage,
+ efx_tc_counter_id_ht_params);
+ efx_tc_flower_release_counter(efx, ctr->cnt);
+ kfree(ctr);
+}
+
+struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
+ struct efx_nic *efx, unsigned long cookie,
+ enum efx_tc_counter_type type)
+{
+ struct efx_tc_counter_index *ctr, *old;
+ struct efx_tc_counter *cnt;
+
+ ctr = kzalloc(sizeof(*ctr), GFP_USER);
+ if (!ctr)
+ return ERR_PTR(-ENOMEM);
+ ctr->cookie = cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->counter_id_ht,
+ &ctr->linkage,
+ efx_tc_counter_id_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(ctr);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found */
+ ctr = old;
+ } else {
+ cnt = efx_tc_flower_allocate_counter(efx, type);
+ if (IS_ERR(cnt)) {
+ rhashtable_remove_fast(&efx->tc->counter_id_ht,
+ &ctr->linkage,
+ efx_tc_counter_id_ht_params);
+ kfree(ctr);
+ return (void *)cnt; /* it's an ERR_PTR */
+ }
+ ctr->cnt = cnt;
+ refcount_set(&ctr->ref, 1);
+ }
+ return ctr;
+}
+
+struct efx_tc_counter_index *efx_tc_flower_find_counter_index(
+ struct efx_nic *efx, unsigned long cookie)
+{
+ struct efx_tc_counter_index key = {};
+
+ key.cookie = cookie;
+ return rhashtable_lookup_fast(&efx->tc->counter_id_ht, &key,
+ efx_tc_counter_id_ht_params);
+}
+
+/* TC Channel. Counter updates are delivered on this channel's RXQ. */
+
+static void efx_tc_handle_no_channel(struct efx_nic *efx)
+{
+ netif_warn(efx, drv, efx->net_dev,
+ "MAE counters require MSI-X and 1 additional interrupt vector.\n");
+}
+
+static int efx_tc_probe_channel(struct efx_channel *channel)
+{
+ struct efx_rx_queue *rx_queue = &channel->rx_queue;
+
+ channel->irq_moderation_us = 0;
+ rx_queue->core_index = 0;
+
+ INIT_WORK(&rx_queue->grant_work, efx_mae_counters_grant_credits);
+
+ return 0;
+}
+
+static int efx_tc_start_channel(struct efx_channel *channel)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = channel->efx;
+
+ return efx_mae_start_counters(efx, rx_queue);
+}
+
+static void efx_tc_stop_channel(struct efx_channel *channel)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ rc = efx_mae_stop_counters(efx, rx_queue);
+ if (rc)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to stop MAE counters streaming, rc=%d.\n",
+ rc);
+ rx_queue->grant_credits = false;
+ flush_work(&rx_queue->grant_work);
+}
+
+static void efx_tc_remove_channel(struct efx_channel *channel)
+{
+}
+
+static void efx_tc_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
+{
+ snprintf(buf, len, "%s-mae", channel->efx->name);
+}
+
+static void efx_tc_counter_update(struct efx_nic *efx,
+ enum efx_tc_counter_type counter_type,
+ u32 counter_idx, u64 packets, u64 bytes,
+ u32 mark)
+{
+ struct efx_tc_counter *cnt;
+
+ rcu_read_lock(); /* Protect against deletion of 'cnt' */
+ cnt = efx_tc_flower_find_counter_by_fw_id(efx, counter_type, counter_idx);
+ if (!cnt) {
+ /* This can legitimately happen when a counter is removed,
+ * with updates for the counter still in-flight; however this
+ * should be an infrequent occurrence.
+ */
+ if (net_ratelimit())
+ netif_dbg(efx, drv, efx->net_dev,
+ "Got update for unwanted MAE counter %u type %u\n",
+ counter_idx, counter_type);
+ goto out;
+ }
+
+ spin_lock_bh(&cnt->lock);
+ if ((s32)mark - (s32)cnt->gen < 0) {
+ /* This counter update packet is from before the counter was
+ * allocated; thus it must be for a previous counter with
+ * the same ID that has since been freed, and it should be
+ * ignored.
+ */
+ } else {
+ /* Update latest seen generation count. This ensures that
+ * even a long-lived counter won't start getting ignored if
+ * the generation count wraps around, unless it somehow
+ * manages to go 1<<31 generations without an update.
+ */
+ cnt->gen = mark;
+ /* update counter values */
+ cnt->packets += packets;
+ cnt->bytes += bytes;
+ cnt->touched = jiffies;
+ }
+ spin_unlock_bh(&cnt->lock);
+out:
+ rcu_read_unlock();
+}
+
+static void efx_tc_rx_version_1(struct efx_nic *efx, const u8 *data, u32 mark)
+{
+ u16 n_counters, i;
+
+ /* Header format:
+ * + | 0 | 1 | 2 | 3 |
+ * 0 |version | reserved |
+ * 4 | seq_index | n_counters |
+ */
+
+ n_counters = le16_to_cpu(*(const __le16 *)(data + 6));
+
+ /* Counter update entry format:
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e | f |
+ * | counter_idx | packet_count | byte_count |
+ */
+ for (i = 0; i < n_counters; i++) {
+ const void *entry = data + 8 + 16 * i;
+ u64 packet_count, byte_count;
+ u32 counter_idx;
+
+ counter_idx = le32_to_cpu(*(const __le32 *)entry);
+ packet_count = le32_to_cpu(*(const __le32 *)(entry + 4)) |
+ ((u64)le16_to_cpu(*(const __le16 *)(entry + 8)) << 32);
+ byte_count = le16_to_cpu(*(const __le16 *)(entry + 10)) |
+ ((u64)le32_to_cpu(*(const __le32 *)(entry + 12)) << 16);
+ efx_tc_counter_update(efx, EFX_TC_COUNTER_TYPE_AR, counter_idx,
+ packet_count, byte_count, mark);
+ }
+}
+
+#define TCV2_HDR_PTR(pkt, field) \
+ ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_LBN & 7), \
+ (pkt) + ERF_SC_PACKETISER_HEADER_##field##_LBN / 8)
+#define TCV2_HDR_BYTE(pkt, field) \
+ ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_WIDTH != 8),\
+ *TCV2_HDR_PTR(pkt, field))
+#define TCV2_HDR_WORD(pkt, field) \
+ ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_WIDTH != 16),\
+ (void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_LBN & 15), \
+ *(__force const __le16 *)TCV2_HDR_PTR(pkt, field))
+#define TCV2_PKT_PTR(pkt, poff, i, field) \
+ ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_PAYLOAD_##field##_LBN & 7), \
+ (pkt) + ERF_SC_PACKETISER_PAYLOAD_##field##_LBN/8 + poff + \
+ i * ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE)
+
+/* Read a little-endian 48-bit field with 16-bit alignment */
+static u64 efx_tc_read48(const __le16 *field)
+{
+ u64 out = 0;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ out |= (u64)le16_to_cpu(field[i]) << (i * 16);
+ return out;
+}
+
+static enum efx_tc_counter_type efx_tc_rx_version_2(struct efx_nic *efx,
+ const u8 *data, u32 mark)
+{
+ u8 payload_offset, header_offset, ident;
+ enum efx_tc_counter_type type;
+ u16 n_counters, i;
+
+ ident = TCV2_HDR_BYTE(data, IDENTIFIER);
+ switch (ident) {
+ case ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR:
+ type = EFX_TC_COUNTER_TYPE_AR;
+ break;
+ case ERF_SC_PACKETISER_HEADER_IDENTIFIER_CT:
+ type = EFX_TC_COUNTER_TYPE_CT;
+ break;
+ case ERF_SC_PACKETISER_HEADER_IDENTIFIER_OR:
+ type = EFX_TC_COUNTER_TYPE_OR;
+ break;
+ default:
+ if (net_ratelimit())
+ netif_err(efx, drv, efx->net_dev,
+ "ignored v2 MAE counter packet (bad identifier %u"
+ "), counters may be inaccurate\n", ident);
+ return EFX_TC_COUNTER_TYPE_MAX;
+ }
+ header_offset = TCV2_HDR_BYTE(data, HEADER_OFFSET);
+ /* mae_counter_format.h implies that this offset is fixed, since it
+ * carries on with SOP-based LBNs for the fields in this header
+ */
+ if (header_offset != ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT) {
+ if (net_ratelimit())
+ netif_err(efx, drv, efx->net_dev,
+ "choked on v2 MAE counter packet (bad header_offset %u"
+ "), counters may be inaccurate\n", header_offset);
+ return EFX_TC_COUNTER_TYPE_MAX;
+ }
+ payload_offset = TCV2_HDR_BYTE(data, PAYLOAD_OFFSET);
+ n_counters = le16_to_cpu(TCV2_HDR_WORD(data, COUNT));
+
+ for (i = 0; i < n_counters; i++) {
+ const void *counter_idx_p, *packet_count_p, *byte_count_p;
+ u64 packet_count, byte_count;
+ u32 counter_idx;
+
+ /* 24-bit field with 32-bit alignment */
+ counter_idx_p = TCV2_PKT_PTR(data, payload_offset, i, COUNTER_INDEX);
+ BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_WIDTH != 24);
+ BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_LBN & 31);
+ counter_idx = le32_to_cpu(*(const __le32 *)counter_idx_p) & 0xffffff;
+ /* 48-bit field with 16-bit alignment */
+ packet_count_p = TCV2_PKT_PTR(data, payload_offset, i, PACKET_COUNT);
+ BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_WIDTH != 48);
+ BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LBN & 15);
+ packet_count = efx_tc_read48((const __le16 *)packet_count_p);
+ /* 48-bit field with 16-bit alignment */
+ byte_count_p = TCV2_PKT_PTR(data, payload_offset, i, BYTE_COUNT);
+ BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_WIDTH != 48);
+ BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LBN & 15);
+ byte_count = efx_tc_read48((const __le16 *)byte_count_p);
+
+ if (type == EFX_TC_COUNTER_TYPE_CT) {
+ /* CT counters are 1-bit saturating counters to update
+ * the lastuse time in CT stats. A received CT counter
+ * should have packet counter to 0 and only LSB bit on
+ * in byte counter.
+ */
+ if (packet_count || byte_count != 1)
+ netdev_warn_once(efx->net_dev,
+ "CT counter with inconsistent state (%llu, %llu)\n",
+ packet_count, byte_count);
+ /* Do not increment the driver's byte counter */
+ byte_count = 0;
+ }
+
+ efx_tc_counter_update(efx, type, counter_idx, packet_count,
+ byte_count, mark);
+ }
+ return type;
+}
+
+/* We always swallow the packet, whether successful or not, since it's not
+ * a network packet and shouldn't ever be forwarded to the stack.
+ * @mark is the generation count for counter allocations.
+ */
+static bool efx_tc_rx(struct efx_rx_queue *rx_queue, u32 mark)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
+ channel->rx_pkt_index);
+ const u8 *data = efx_rx_buf_va(rx_buf);
+ struct efx_nic *efx = rx_queue->efx;
+ enum efx_tc_counter_type type;
+ u8 version;
+
+ /* version is always first byte of packet */
+ version = *data;
+ switch (version) {
+ case 1:
+ type = EFX_TC_COUNTER_TYPE_AR;
+ efx_tc_rx_version_1(efx, data, mark);
+ break;
+ case ERF_SC_PACKETISER_HEADER_VERSION_VALUE: // 2
+ type = efx_tc_rx_version_2(efx, data, mark);
+ break;
+ default:
+ if (net_ratelimit())
+ netif_err(efx, drv, efx->net_dev,
+ "choked on MAE counter packet (bad version %u"
+ "); counters may be inaccurate\n",
+ version);
+ goto out;
+ }
+
+ if (type < EFX_TC_COUNTER_TYPE_MAX) {
+ /* Update seen_gen unconditionally, to avoid a missed wakeup if
+ * we race with efx_mae_stop_counters().
+ */
+ efx->tc->seen_gen[type] = mark;
+ if (efx->tc->flush_counters &&
+ (s32)(efx->tc->flush_gen[type] - mark) <= 0)
+ wake_up(&efx->tc->flush_wq);
+ }
+out:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->rx_pkt_n_frags = 0;
+ return true;
+}
+
+const struct efx_channel_type efx_tc_channel_type = {
+ .handle_no_channel = efx_tc_handle_no_channel,
+ .pre_probe = efx_tc_probe_channel,
+ .start = efx_tc_start_channel,
+ .stop = efx_tc_stop_channel,
+ .post_remove = efx_tc_remove_channel,
+ .get_name = efx_tc_get_channel_name,
+ .receive_raw = efx_tc_rx,
+ .keep_eventq = true,
+};
diff --git a/drivers/net/ethernet/sfc/tc_counters.h b/drivers/net/ethernet/sfc/tc_counters.h
new file mode 100644
index 000000000000..8fc7c4bbb29c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_counters.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_COUNTERS_H
+#define EFX_TC_COUNTERS_H
+#include <linux/refcount.h>
+#include "net_driver.h"
+
+#include "mcdi_pcol.h" /* for MAE_COUNTER_TYPE_* */
+
+enum efx_tc_counter_type {
+ EFX_TC_COUNTER_TYPE_AR = MAE_COUNTER_TYPE_AR,
+ EFX_TC_COUNTER_TYPE_CT = MAE_COUNTER_TYPE_CT,
+ EFX_TC_COUNTER_TYPE_OR = MAE_COUNTER_TYPE_OR,
+ EFX_TC_COUNTER_TYPE_MAX
+};
+
+struct efx_tc_counter {
+ u32 fw_id; /* index in firmware counter table */
+ enum efx_tc_counter_type type;
+ struct rhash_head linkage; /* efx->tc->counter_ht */
+ spinlock_t lock; /* Serialises updates to counter values */
+ u32 gen; /* Generation count at which this counter is current */
+ u64 packets, bytes;
+ u64 old_packets, old_bytes; /* Values last time passed to userspace */
+ /* jiffies of the last time we saw packets increase */
+ unsigned long touched;
+};
+
+struct efx_tc_counter_index {
+ unsigned long cookie;
+ struct rhash_head linkage; /* efx->tc->counter_id_ht */
+ refcount_t ref;
+ struct efx_tc_counter *cnt;
+};
+
+/* create/uncreate/teardown hashtables */
+int efx_tc_init_counters(struct efx_nic *efx);
+void efx_tc_destroy_counters(struct efx_nic *efx);
+void efx_tc_fini_counters(struct efx_nic *efx);
+
+struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
+ struct efx_nic *efx, unsigned long cookie,
+ enum efx_tc_counter_type type);
+void efx_tc_flower_put_counter_index(struct efx_nic *efx,
+ struct efx_tc_counter_index *ctr);
+struct efx_tc_counter_index *efx_tc_flower_find_counter_index(
+ struct efx_nic *efx, unsigned long cookie);
+
+extern const struct efx_channel_type efx_tc_channel_type;
+
+#endif /* EFX_TC_COUNTERS_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c5f88f7a7a04..4ed4082836a9 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -207,11 +207,11 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
- vaddr = kmap_atomic(skb_frag_page(f));
+ vaddr = kmap_local_page(skb_frag_page(f));
efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
skb_frag_size(f), copy_buf);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
}
EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 2524c907f386..5f22a8a4d27b 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -75,20 +75,6 @@ config EPIC100
More specific information and updates are available from
<http://www.scyld.com/network/epic100.html>.
-config SMC911X
- tristate "SMSC LAN911[5678] support"
- select CRC32
- select MII
- depends on (ARM || SUPERH || COMPILE_TEST)
- help
- This is a driver for SMSC's LAN911x series of Ethernet chipsets
- including the new LAN9115, LAN9116, LAN9117, and LAN9118.
- Say Y here if you want it compiled into the kernel.
-
- This driver is also available as a module. The module will be
- called smc911x. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.rst>
-
config SMSC911X
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
depends on HAS_IOMEM
diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile
index 4105912b1629..1501fa364c13 100644
--- a/drivers/net/ethernet/smsc/Makefile
+++ b/drivers/net/ethernet/smsc/Makefile
@@ -8,5 +8,4 @@ obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o
obj-$(CONFIG_EPIC100) += epic100.o
obj-$(CONFIG_SMSC9420) += smsc9420.o
-obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
deleted file mode 100644
index 52ecfb461c41..000000000000
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ /dev/null
@@ -1,2198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * smc911x.c
- * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
- *
- * Copyright (C) 2005 Sensoria Corp
- * Derived from the unified SMC91x driver by Nicolas Pitre
- * and the smsc911x.c reference driver by SMSC
- *
- * Arguments:
- * watchdog = TX watchdog timeout
- * tx_fifo_kb = Size of TX FIFO in KB
- *
- * History:
- * 04/16/05 Dustin McIntire Initial version
- */
-static const char version[] =
- "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
-
-/* Debugging options */
-#define ENABLE_SMC_DEBUG_RX 0
-#define ENABLE_SMC_DEBUG_TX 0
-#define ENABLE_SMC_DEBUG_DMA 0
-#define ENABLE_SMC_DEBUG_PKTS 0
-#define ENABLE_SMC_DEBUG_MISC 0
-#define ENABLE_SMC_DEBUG_FUNC 0
-
-#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
-#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
-#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
-#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
-#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
-#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
-
-#ifndef SMC_DEBUG
-#define SMC_DEBUG ( SMC_DEBUG_RX | \
- SMC_DEBUG_TX | \
- SMC_DEBUG_DMA | \
- SMC_DEBUG_PKTS | \
- SMC_DEBUG_MISC | \
- SMC_DEBUG_FUNC \
- )
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/crc32.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/workqueue.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <linux/dmaengine.h>
-
-#include <asm/io.h>
-
-#include "smc911x.h"
-
-/*
- * Transmit timeout, default 5 seconds.
- */
-static int watchdog = 5000;
-module_param(watchdog, int, 0400);
-MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
-
-static int tx_fifo_kb=8;
-module_param(tx_fifo_kb, int, 0400);
-MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:smc911x");
-
-/*
- * The internal workings of the driver. If you are changing anything
- * here with the SMC stuff, you should have the datasheet and know
- * what you are doing.
- */
-#define CARDNAME "smc911x"
-
-/*
- * Use power-down feature of the chip
- */
-#define POWER_DOWN 1
-
-#if SMC_DEBUG > 0
-#define DBG(n, dev, args...) \
- do { \
- if (SMC_DEBUG & (n)) \
- netdev_dbg(dev, args); \
- } while (0)
-
-#define PRINTK(dev, args...) netdev_info(dev, args)
-#else
-#define DBG(n, dev, args...) \
- while (0) { \
- netdev_dbg(dev, args); \
- }
-#define PRINTK(dev, args...) netdev_dbg(dev, args)
-#endif
-
-#if SMC_DEBUG_PKTS > 0
-static void PRINT_PKT(u_char *buf, int length)
-{
- int i;
- int remainder;
- int lines;
-
- lines = length / 16;
- remainder = length % 16;
-
- for (i = 0; i < lines ; i ++) {
- int cur;
- printk(KERN_DEBUG);
- for (cur = 0; cur < 8; cur++) {
- u_char a, b;
- a = *buf++;
- b = *buf++;
- pr_cont("%02x%02x ", a, b);
- }
- pr_cont("\n");
- }
- printk(KERN_DEBUG);
- for (i = 0; i < remainder/2 ; i++) {
- u_char a, b;
- a = *buf++;
- b = *buf++;
- pr_cont("%02x%02x ", a, b);
- }
- pr_cont("\n");
-}
-#else
-static inline void PRINT_PKT(u_char *buf, int length) { }
-#endif
-
-
-/* this enables an interrupt in the interrupt mask register */
-#define SMC_ENABLE_INT(lp, x) do { \
- unsigned int __mask; \
- __mask = SMC_GET_INT_EN((lp)); \
- __mask |= (x); \
- SMC_SET_INT_EN((lp), __mask); \
-} while (0)
-
-/* this disables an interrupt from the interrupt mask register */
-#define SMC_DISABLE_INT(lp, x) do { \
- unsigned int __mask; \
- __mask = SMC_GET_INT_EN((lp)); \
- __mask &= ~(x); \
- SMC_SET_INT_EN((lp), __mask); \
-} while (0)
-
-/*
- * this does a soft reset on the device
- */
-static void smc911x_reset(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int reg, timeout=0, resets=1, irq_cfg;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- /* Take out of PM setting first */
- if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
- /* Write to the bytetest will take out of powerdown */
- SMC_SET_BYTE_TEST(lp, 0);
- timeout=10;
- do {
- udelay(10);
- reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
- } while (--timeout && !reg);
- if (timeout == 0) {
- PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
- return;
- }
- }
-
- /* Disable all interrupts */
- spin_lock_irqsave(&lp->lock, flags);
- SMC_SET_INT_EN(lp, 0);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- while (resets--) {
- SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
- timeout=10;
- do {
- udelay(10);
- reg = SMC_GET_HW_CFG(lp);
- /* If chip indicates reset timeout then try again */
- if (reg & HW_CFG_SRST_TO_) {
- PRINTK(dev, "chip reset timeout, retrying...\n");
- resets++;
- break;
- }
- } while (--timeout && (reg & HW_CFG_SRST_));
- }
- if (timeout == 0) {
- PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
- return;
- }
-
- /* make sure EEPROM has finished loading before setting GPIO_CFG */
- timeout=1000;
- while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
- udelay(10);
-
- if (timeout == 0){
- PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
- return;
- }
-
- /* Initialize interrupts */
- SMC_SET_INT_EN(lp, 0);
- SMC_ACK_INT(lp, -1);
-
- /* Reset the FIFO level and flow control settings */
- SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
-//TODO: Figure out what appropriate pause time is
- SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
- SMC_SET_AFC_CFG(lp, lp->afc_cfg);
-
-
- /* Set to LED outputs */
- SMC_SET_GPIO_CFG(lp, 0x70070000);
-
- /*
- * Deassert IRQ for 1*10us for edge type interrupts
- * and drive IRQ pin push-pull
- */
- irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
-#ifdef SMC_DYNAMIC_BUS_CONFIG
- if (lp->cfg.irq_polarity)
- irq_cfg |= INT_CFG_IRQ_POL_;
-#endif
- SMC_SET_IRQ_CFG(lp, irq_cfg);
-
- /* clear anything saved */
- if (lp->pending_tx_skb != NULL) {
- dev_kfree_skb (lp->pending_tx_skb);
- lp->pending_tx_skb = NULL;
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
- }
-}
-
-/*
- * Enable Interrupts, Receive, and Transmit
- */
-static void smc911x_enable(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned mask, cfg, cr;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- spin_lock_irqsave(&lp->lock, flags);
-
- SMC_SET_MAC_ADDR(lp, dev->dev_addr);
-
- /* Enable TX */
- cfg = SMC_GET_HW_CFG(lp);
- cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
- cfg |= HW_CFG_SF_;
- SMC_SET_HW_CFG(lp, cfg);
- SMC_SET_FIFO_TDA(lp, 0xFF);
- /* Update TX stats on every 64 packets received or every 1 sec */
- SMC_SET_FIFO_TSL(lp, 64);
- SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
-
- SMC_GET_MAC_CR(lp, cr);
- cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
- SMC_SET_MAC_CR(lp, cr);
- SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
-
- /* Add 2 byte padding to start of packets */
- SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
-
- /* Turn on receiver and enable RX */
- if (cr & MAC_CR_RXEN_)
- DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
-
- SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
-
- /* Interrupt on every received packet */
- SMC_SET_FIFO_RSA(lp, 0x01);
- SMC_SET_FIFO_RSL(lp, 0x00);
-
- /* now, enable interrupts */
- mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
- INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
- INT_EN_PHY_INT_EN_;
- if (IS_REV_A(lp->revision))
- mask|=INT_EN_RDFL_EN_;
- else {
- mask|=INT_EN_RDFO_EN_;
- }
- SMC_ENABLE_INT(lp, mask);
-
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-/*
- * this puts the device in an inactive state
- */
-static void smc911x_shutdown(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned cr;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
-
- /* Disable IRQ's */
- SMC_SET_INT_EN(lp, 0);
-
- /* Turn of Rx and TX */
- spin_lock_irqsave(&lp->lock, flags);
- SMC_GET_MAC_CR(lp, cr);
- cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
- SMC_SET_MAC_CR(lp, cr);
- SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-static inline void smc911x_drop_pkt(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int fifo_count, timeout, reg;
-
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
- CARDNAME, __func__);
- fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
- if (fifo_count <= 4) {
- /* Manually dump the packet data */
- while (fifo_count--)
- SMC_GET_RX_FIFO(lp);
- } else {
- /* Fast forward through the bad packet */
- SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
- timeout=50;
- do {
- udelay(10);
- reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
- } while (--timeout && reg);
- if (timeout == 0) {
- PRINTK(dev, "timeout waiting for RX fast forward\n");
- }
- }
-}
-
-/*
- * This is the procedure to handle the receipt of a packet.
- * It should be called after checking for packet presence in
- * the RX status FIFO. It must be called with the spin lock
- * already held.
- */
-static inline void smc911x_rcv(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int pkt_len, status;
- struct sk_buff *skb;
- unsigned char *data;
-
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
- __func__);
- status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
- (status & 0x3fff0000) >> 16, status & 0xc000ffff);
- pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
- if (status & RX_STS_ES_) {
- /* Deal with a bad packet */
- dev->stats.rx_errors++;
- if (status & RX_STS_CRC_ERR_)
- dev->stats.rx_crc_errors++;
- else {
- if (status & RX_STS_LEN_ERR_)
- dev->stats.rx_length_errors++;
- if (status & RX_STS_MCAST_)
- dev->stats.multicast++;
- }
- /* Remove the bad packet data from the RX FIFO */
- smc911x_drop_pkt(dev);
- } else {
- /* Receive a valid packet */
- /* Alloc a buffer with extra room for DMA alignment */
- skb = netdev_alloc_skb(dev, pkt_len+32);
- if (unlikely(skb == NULL)) {
- PRINTK(dev, "Low memory, rcvd packet dropped.\n");
- dev->stats.rx_dropped++;
- smc911x_drop_pkt(dev);
- return;
- }
- /* Align IP header to 32 bits
- * Note that the device is configured to add a 2
- * byte padding to the packet start, so we really
- * want to write to the orignal data pointer */
- data = skb->data;
- skb_reserve(skb, 2);
- skb_put(skb,pkt_len-4);
-#ifdef SMC_USE_DMA
- {
- unsigned int fifo;
- /* Lower the FIFO threshold if possible */
- fifo = SMC_GET_FIFO_INT(lp);
- if (fifo & 0xFF) fifo--;
- DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
- fifo & 0xff);
- SMC_SET_FIFO_INT(lp, fifo);
- /* Setup RX DMA */
- SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
- lp->rxdma_active = 1;
- lp->current_rx_skb = skb;
- SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
- /* Packet processing deferred to DMA RX interrupt */
- }
-#else
- SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
- SMC_PULL_DATA(lp, data, pkt_len+2+3);
-
- DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
- PRINT_PKT(data, min(pkt_len - 4, 64U));
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len-4;
-#endif
- }
-}
-
-/*
- * This is called to actually send a packet to the chip.
- */
-static void smc911x_hardware_send_pkt(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- struct sk_buff *skb;
- unsigned int cmdA, cmdB, len;
- unsigned char *buf;
-
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
- BUG_ON(lp->pending_tx_skb == NULL);
-
- skb = lp->pending_tx_skb;
- lp->pending_tx_skb = NULL;
-
- /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
- /* cmdB {31:16] pkt tag [10:0] length */
-#ifdef SMC_USE_DMA
- /* 16 byte buffer alignment mode */
- buf = (char*)((u32)(skb->data) & ~0xF);
- len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
- cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
- TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
- skb->len;
-#else
- buf = (char *)((uintptr_t)skb->data & ~0x3);
- len = (skb->len + 3 + ((uintptr_t)skb->data & 3)) & ~0x3;
- cmdA = (((uintptr_t)skb->data & 0x3) << 16) |
- TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
- skb->len;
-#endif
- /* tag is packet length so we can use this in stats update later */
- cmdB = (skb->len << 16) | (skb->len & 0x7FF);
-
- DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
- len, len, buf, cmdA, cmdB);
- SMC_SET_TX_FIFO(lp, cmdA);
- SMC_SET_TX_FIFO(lp, cmdB);
-
- DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
- PRINT_PKT(buf, min(len, 64U));
-
- /* Send pkt via PIO or DMA */
-#ifdef SMC_USE_DMA
- lp->current_tx_skb = skb;
- SMC_PUSH_DATA(lp, buf, len);
- /* DMA complete IRQ will free buffer and set jiffies */
-#else
- SMC_PUSH_DATA(lp, buf, len);
- netif_trans_update(dev);
- dev_kfree_skb_irq(skb);
-#endif
- if (!lp->tx_throttle) {
- netif_wake_queue(dev);
- }
- SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
-}
-
-/*
- * Since I am not sure if I will have enough room in the chip's ram
- * to store the packet, I call this routine which either sends it
- * now, or set the card to generates an interrupt when ready
- * for the packet.
- */
-static netdev_tx_t
-smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int free;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
- __func__);
-
- spin_lock_irqsave(&lp->lock, flags);
-
- BUG_ON(lp->pending_tx_skb != NULL);
-
- free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
- DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
-
- /* Turn off the flow when running out of space in FIFO */
- if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
- DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
- free);
- /* Reenable when at least 1 packet of size MTU present */
- SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
- lp->tx_throttle = 1;
- netif_stop_queue(dev);
- }
-
- /* Drop packets when we run out of space in TX FIFO
- * Account for overhead required for:
- *
- * Tx command words 8 bytes
- * Start offset 15 bytes
- * End padding 15 bytes
- */
- if (unlikely(free < (skb->len + 8 + 15 + 15))) {
- netdev_warn(dev, "No Tx free space %d < %d\n",
- free, skb->len);
- lp->pending_tx_skb = NULL;
- dev->stats.tx_errors++;
- dev->stats.tx_dropped++;
- spin_unlock_irqrestore(&lp->lock, flags);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
-#ifdef SMC_USE_DMA
- {
- /* If the DMA is already running then defer this packet Tx until
- * the DMA IRQ starts it
- */
- if (lp->txdma_active) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
- lp->pending_tx_skb = skb;
- netif_stop_queue(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- return NETDEV_TX_OK;
- } else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
- lp->txdma_active = 1;
- }
- }
-#endif
- lp->pending_tx_skb = skb;
- smc911x_hardware_send_pkt(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * This handles a TX status interrupt, which is only called when:
- * - a TX error occurred, or
- * - TX of a packet completed.
- */
-static void smc911x_tx(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int tx_status;
-
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
- __func__);
-
- /* Collect the TX status */
- while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
- DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
- (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
- tx_status = SMC_GET_TX_STS_FIFO(lp);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes+=tx_status>>16;
- DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
- (tx_status & 0xffff0000) >> 16,
- tx_status & 0x0000ffff);
- /* count Tx errors, but ignore lost carrier errors when in
- * full-duplex mode */
- if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
- !(tx_status & 0x00000306))) {
- dev->stats.tx_errors++;
- }
- if (tx_status & TX_STS_MANY_COLL_) {
- dev->stats.collisions+=16;
- dev->stats.tx_aborted_errors++;
- } else {
- dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
- }
- /* carrier error only has meaning for half-duplex communication */
- if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
- !lp->ctl_rfduplx) {
- dev->stats.tx_carrier_errors++;
- }
- if (tx_status & TX_STS_LATE_COLL_) {
- dev->stats.collisions++;
- dev->stats.tx_aborted_errors++;
- }
- }
-}
-
-
-/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
-/*
- * Reads a register from the MII Management serial interface
- */
-
-static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int phydata;
-
- SMC_GET_MII(lp, phyreg, phyaddr, phydata);
-
- DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
- __func__, phyaddr, phyreg, phydata);
- return phydata;
-}
-
-
-/*
- * Writes a register to the MII Management serial interface
- */
-static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
- int phydata)
-{
- struct smc911x_local *lp = netdev_priv(dev);
-
- DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
-
- SMC_SET_MII(lp, phyreg, phyaddr, phydata);
-}
-
-/*
- * Finds and reports the PHY address (115 and 117 have external
- * PHY interface 118 has internal only
- */
-static void smc911x_phy_detect(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int phyaddr;
- unsigned int cfg, id1, id2;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- lp->phy_type = 0;
-
- /*
- * Scan all 32 PHY addresses if necessary, starting at
- * PHY#1 to PHY#31, and then PHY#0 last.
- */
- switch(lp->version) {
- case CHIP_9115:
- case CHIP_9117:
- case CHIP_9215:
- case CHIP_9217:
- cfg = SMC_GET_HW_CFG(lp);
- if (cfg & HW_CFG_EXT_PHY_DET_) {
- cfg &= ~HW_CFG_PHY_CLK_SEL_;
- cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
- SMC_SET_HW_CFG(lp, cfg);
- udelay(10); /* Wait for clocks to stop */
-
- cfg |= HW_CFG_EXT_PHY_EN_;
- SMC_SET_HW_CFG(lp, cfg);
- udelay(10); /* Wait for clocks to stop */
-
- cfg &= ~HW_CFG_PHY_CLK_SEL_;
- cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
- SMC_SET_HW_CFG(lp, cfg);
- udelay(10); /* Wait for clocks to stop */
-
- cfg |= HW_CFG_SMI_SEL_;
- SMC_SET_HW_CFG(lp, cfg);
-
- for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
-
- /* Read the PHY identifiers */
- SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
- SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
-
- /* Make sure it is a valid identifier */
- if (id1 != 0x0000 && id1 != 0xffff &&
- id1 != 0x8000 && id2 != 0x0000 &&
- id2 != 0xffff && id2 != 0x8000) {
- /* Save the PHY's address */
- lp->mii.phy_id = phyaddr & 31;
- lp->phy_type = id1 << 16 | id2;
- break;
- }
- }
- if (phyaddr < 32)
- /* Found an external PHY */
- break;
- }
- fallthrough;
- default:
- /* Internal media only */
- SMC_GET_PHY_ID1(lp, 1, id1);
- SMC_GET_PHY_ID2(lp, 1, id2);
- /* Save the PHY's address */
- lp->mii.phy_id = 1;
- lp->phy_type = id1 << 16 | id2;
- }
-
- DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n",
- id1, id2, lp->mii.phy_id);
-}
-
-/*
- * Sets the PHY to a configuration as determined by the user.
- * Called with spin_lock held.
- */
-static int smc911x_phy_fixed(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int phyaddr = lp->mii.phy_id;
- int bmcr;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- /* Enter Link Disable state */
- SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
- bmcr |= BMCR_PDOWN;
- SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
-
- /*
- * Set our fixed capabilities
- * Disable auto-negotiation
- */
- bmcr &= ~BMCR_ANENABLE;
- if (lp->ctl_rfduplx)
- bmcr |= BMCR_FULLDPLX;
-
- if (lp->ctl_rspeed == 100)
- bmcr |= BMCR_SPEED100;
-
- /* Write our capabilities to the phy control register */
- SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
-
- /* Re-Configure the Receive/Phy Control register */
- bmcr &= ~BMCR_PDOWN;
- SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
-
- return 1;
-}
-
-/**
- * smc911x_phy_reset - reset the phy
- * @dev: net device
- * @phy: phy address
- *
- * Issue a software reset for the specified PHY and
- * wait up to 100ms for the reset to complete. We should
- * not access the PHY for 50ms after issuing the reset.
- *
- * The time to wait appears to be dependent on the PHY.
- *
- */
-static int smc911x_phy_reset(struct net_device *dev, int phy)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int timeout;
- unsigned long flags;
- unsigned int reg;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
-
- spin_lock_irqsave(&lp->lock, flags);
- reg = SMC_GET_PMT_CTRL(lp);
- reg &= ~0xfffff030;
- reg |= PMT_CTRL_PHY_RST_;
- SMC_SET_PMT_CTRL(lp, reg);
- spin_unlock_irqrestore(&lp->lock, flags);
- for (timeout = 2; timeout; timeout--) {
- msleep(50);
- spin_lock_irqsave(&lp->lock, flags);
- reg = SMC_GET_PMT_CTRL(lp);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (!(reg & PMT_CTRL_PHY_RST_)) {
- /* extra delay required because the phy may
- * not be completed with its reset
- * when PHY_BCR_RESET_ is cleared. 256us
- * should suffice, but use 500us to be safe
- */
- udelay(500);
- break;
- }
- }
-
- return reg & PMT_CTRL_PHY_RST_;
-}
-
-/**
- * smc911x_phy_powerdown - powerdown phy
- * @dev: net device
- * @phy: phy address
- *
- * Power down the specified PHY
- */
-static void smc911x_phy_powerdown(struct net_device *dev, int phy)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int bmcr;
-
- /* Enter Link Disable state */
- SMC_GET_PHY_BMCR(lp, phy, bmcr);
- bmcr |= BMCR_PDOWN;
- SMC_SET_PHY_BMCR(lp, phy, bmcr);
-}
-
-/**
- * smc911x_phy_check_media - check the media status and adjust BMCR
- * @dev: net device
- * @init: set true for initialisation
- *
- * Select duplex mode depending on negotiation state. This
- * also updates our carrier state.
- */
-static void smc911x_phy_check_media(struct net_device *dev, int init)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int phyaddr = lp->mii.phy_id;
- unsigned int bmcr, cr;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
- /* duplex state has changed */
- SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
- SMC_GET_MAC_CR(lp, cr);
- if (lp->mii.full_duplex) {
- DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
- bmcr |= BMCR_FULLDPLX;
- cr |= MAC_CR_RCVOWN_;
- } else {
- DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
- bmcr &= ~BMCR_FULLDPLX;
- cr &= ~MAC_CR_RCVOWN_;
- }
- SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
- SMC_SET_MAC_CR(lp, cr);
- }
-}
-
-/*
- * Configures the specified PHY through the MII management interface
- * using Autonegotiation.
- * Calls smc911x_phy_fixed() if the user has requested a certain config.
- * If RPC ANEG bit is set, the media selection is dependent purely on
- * the selection by the MII (either in the MII BMCR reg or the result
- * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
- * is controlled by the RPC SPEED and RPC DPLX bits.
- */
-static void smc911x_phy_configure(struct work_struct *work)
-{
- struct smc911x_local *lp = container_of(work, struct smc911x_local,
- phy_configure);
- struct net_device *dev = lp->netdev;
- int phyaddr = lp->mii.phy_id;
- int my_phy_caps; /* My PHY capabilities */
- int my_ad_caps; /* My Advertised capabilities */
- int status __always_unused;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
-
- /*
- * We should not be called if phy_type is zero.
- */
- if (lp->phy_type == 0)
- return;
-
- if (smc911x_phy_reset(dev, phyaddr)) {
- netdev_info(dev, "PHY reset timed out\n");
- return;
- }
- spin_lock_irqsave(&lp->lock, flags);
-
- /*
- * Enable PHY Interrupts (for register 18)
- * Interrupts listed here are enabled
- */
- SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
- PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
- PHY_INT_MASK_LINK_DOWN_);
-
- /* If the user requested no auto neg, then go set his request */
- if (lp->mii.force_media) {
- smc911x_phy_fixed(dev);
- goto smc911x_phy_configure_exit;
- }
-
- /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
- SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
- if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- netdev_info(dev, "Auto negotiation NOT supported\n");
- smc911x_phy_fixed(dev);
- goto smc911x_phy_configure_exit;
- }
-
- /* CSMA capable w/ both pauses */
- my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-
- if (my_phy_caps & BMSR_100BASE4)
- my_ad_caps |= ADVERTISE_100BASE4;
- if (my_phy_caps & BMSR_100FULL)
- my_ad_caps |= ADVERTISE_100FULL;
- if (my_phy_caps & BMSR_100HALF)
- my_ad_caps |= ADVERTISE_100HALF;
- if (my_phy_caps & BMSR_10FULL)
- my_ad_caps |= ADVERTISE_10FULL;
- if (my_phy_caps & BMSR_10HALF)
- my_ad_caps |= ADVERTISE_10HALF;
-
- /* Disable capabilities not selected by our user */
- if (lp->ctl_rspeed != 100)
- my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
-
- if (!lp->ctl_rfduplx)
- my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
-
- /* Update our Auto-Neg Advertisement Register */
- SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
- lp->mii.advertising = my_ad_caps;
-
- /*
- * Read the register back. Without this, it appears that when
- * auto-negotiation is restarted, sometimes it isn't ready and
- * the link does not come up.
- */
- udelay(10);
- SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
-
- DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
- DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
-
- /* Restart auto-negotiation process in order to advertise my caps */
- SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
-
- smc911x_phy_check_media(dev, 1);
-
-smc911x_phy_configure_exit:
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-/*
- * smc911x_phy_interrupt
- *
- * Purpose: Handle interrupts relating to PHY register 18. This is
- * called from the "hard" interrupt handler under our private spinlock.
- */
-static void smc911x_phy_interrupt(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int phyaddr = lp->mii.phy_id;
- int status __always_unused;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- if (lp->phy_type == 0)
- return;
-
- smc911x_phy_check_media(dev, 0);
- /* read to clear status bits */
- SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
- DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
- status & 0xffff);
- DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
- SMC_GET_AFC_CFG(lp));
-}
-
-/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
-
-/*
- * This is the main routine of the driver, to handle the device when
- * it needs some attention.
- */
-static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int status, mask, timeout;
- unsigned int rx_overrun=0, cr, pkts;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /* Spurious interrupt check */
- if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
- (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- return IRQ_NONE;
- }
-
- mask = SMC_GET_INT_EN(lp);
- SMC_SET_INT_EN(lp, 0);
-
- /* set a timeout value, so I don't stay here forever */
- timeout = 8;
-
-
- do {
- status = SMC_GET_INT(lp);
-
- DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
- status, mask, status & ~mask);
-
- status &= mask;
- if (!status)
- break;
-
- /* Handle SW interrupt condition */
- if (status & INT_STS_SW_INT_) {
- SMC_ACK_INT(lp, INT_STS_SW_INT_);
- mask &= ~INT_EN_SW_INT_EN_;
- }
- /* Handle various error conditions */
- if (status & INT_STS_RXE_) {
- SMC_ACK_INT(lp, INT_STS_RXE_);
- dev->stats.rx_errors++;
- }
- if (status & INT_STS_RXDFH_INT_) {
- SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
- dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
- }
- /* Undocumented interrupt-what is the right thing to do here? */
- if (status & INT_STS_RXDF_INT_) {
- SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
- }
-
- /* Rx Data FIFO exceeds set level */
- if (status & INT_STS_RDFL_) {
- if (IS_REV_A(lp->revision)) {
- rx_overrun=1;
- SMC_GET_MAC_CR(lp, cr);
- cr &= ~MAC_CR_RXEN_;
- SMC_SET_MAC_CR(lp, cr);
- DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
- dev->stats.rx_errors++;
- dev->stats.rx_fifo_errors++;
- }
- SMC_ACK_INT(lp, INT_STS_RDFL_);
- }
- if (status & INT_STS_RDFO_) {
- if (!IS_REV_A(lp->revision)) {
- SMC_GET_MAC_CR(lp, cr);
- cr &= ~MAC_CR_RXEN_;
- SMC_SET_MAC_CR(lp, cr);
- rx_overrun=1;
- DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
- dev->stats.rx_errors++;
- dev->stats.rx_fifo_errors++;
- }
- SMC_ACK_INT(lp, INT_STS_RDFO_);
- }
- /* Handle receive condition */
- if ((status & INT_STS_RSFL_) || rx_overrun) {
- unsigned int fifo;
- DBG(SMC_DEBUG_RX, dev, "RX irq\n");
- fifo = SMC_GET_RX_FIFO_INF(lp);
- pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
- DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
- pkts, fifo & 0xFFFF);
- if (pkts != 0) {
-#ifdef SMC_USE_DMA
- unsigned int fifo;
- if (lp->rxdma_active){
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
- "RX DMA active\n");
- /* The DMA is already running so up the IRQ threshold */
- fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
- fifo |= pkts & 0xFF;
- DBG(SMC_DEBUG_RX, dev,
- "Setting RX stat FIFO threshold to %d\n",
- fifo & 0xff);
- SMC_SET_FIFO_INT(lp, fifo);
- } else
-#endif
- smc911x_rcv(dev);
- }
- SMC_ACK_INT(lp, INT_STS_RSFL_);
- }
- /* Handle transmit FIFO available */
- if (status & INT_STS_TDFA_) {
- DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
- SMC_SET_FIFO_TDA(lp, 0xFF);
- lp->tx_throttle = 0;
-#ifdef SMC_USE_DMA
- if (!lp->txdma_active)
-#endif
- netif_wake_queue(dev);
- SMC_ACK_INT(lp, INT_STS_TDFA_);
- }
- /* Handle transmit done condition */
-#if 1
- if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
- "Tx stat FIFO limit (%d) /GPT irq\n",
- (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
- smc911x_tx(dev);
- SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
- SMC_ACK_INT(lp, INT_STS_TSFL_);
- SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
- }
-#else
- if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
- smc911x_tx(dev);
- SMC_ACK_INT(lp, INT_STS_TSFL_);
- }
-
- if (status & INT_STS_GPT_INT_) {
- DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
- SMC_GET_IRQ_CFG(lp),
- SMC_GET_FIFO_INT(lp),
- SMC_GET_RX_CFG(lp));
- DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
- (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
- SMC_GET_RX_FIFO_INF(lp) & 0xffff,
- SMC_GET_RX_STS_FIFO_PEEK(lp));
- SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
- SMC_ACK_INT(lp, INT_STS_GPT_INT_);
- }
-#endif
-
- /* Handle PHY interrupt condition */
- if (status & INT_STS_PHY_INT_) {
- DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
- smc911x_phy_interrupt(dev);
- SMC_ACK_INT(lp, INT_STS_PHY_INT_);
- }
- } while (--timeout);
-
- /* restore mask state */
- SMC_SET_INT_EN(lp, mask);
-
- DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
- 8-timeout);
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-#ifdef SMC_USE_DMA
-static void
-smc911x_tx_dma_irq(void *data)
-{
- struct smc911x_local *lp = data;
- struct net_device *dev = lp->netdev;
- struct sk_buff *skb = lp->current_tx_skb;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
- BUG_ON(skb == NULL);
- dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
- netif_trans_update(dev);
- dev_kfree_skb_irq(skb);
- lp->current_tx_skb = NULL;
- if (lp->pending_tx_skb != NULL)
- smc911x_hardware_send_pkt(dev);
- else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
- "No pending Tx packets. DMA disabled\n");
- spin_lock_irqsave(&lp->lock, flags);
- lp->txdma_active = 0;
- if (!lp->tx_throttle) {
- netif_wake_queue(dev);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
- }
-
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
- "TX DMA irq completed\n");
-}
-static void
-smc911x_rx_dma_irq(void *data)
-{
- struct smc911x_local *lp = data;
- struct net_device *dev = lp->netdev;
- struct sk_buff *skb = lp->current_rx_skb;
- unsigned long flags;
- unsigned int pkts;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
- dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
- BUG_ON(skb == NULL);
- lp->current_rx_skb = NULL;
- PRINT_PKT(skb->data, skb->len);
- skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
-
- spin_lock_irqsave(&lp->lock, flags);
- pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
- if (pkts != 0) {
- smc911x_rcv(dev);
- }else {
- lp->rxdma_active = 0;
- }
- spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
- "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
- pkts);
-}
-#endif /* SMC_USE_DMA */
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling receive - used by netconsole and other diagnostic tools
- * to allow network i/o with interrupts disabled.
- */
-static void smc911x_poll_controller(struct net_device *dev)
-{
- disable_irq(dev->irq);
- smc911x_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
-}
-#endif
-
-/* Our watchdog timed out. Called by the networking layer */
-static void smc911x_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int status, mask;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- spin_lock_irqsave(&lp->lock, flags);
- status = SMC_GET_INT(lp);
- mask = SMC_GET_INT_EN(lp);
- spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
- status, mask);
-
- /* Dump the current TX FIFO contents and restart */
- mask = SMC_GET_TX_CFG(lp);
- SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
- /*
- * Reconfiguring the PHY doesn't seem like a bad idea here, but
- * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
- * which calls schedule(). Hence we use a work queue.
- */
- if (lp->phy_type != 0)
- schedule_work(&lp->phy_configure);
-
- /* We can accept TX packets again */
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-/*
- * This routine will, depending on the values passed to it,
- * either make it accept multicast packets, go into
- * promiscuous mode (for TCPDUMP and cousins) or accept
- * a select set of multicast packets
- */
-static void smc911x_set_multicast_list(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int multicast_table[2];
- unsigned int mcr, update_multicast = 0;
- unsigned long flags;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- spin_lock_irqsave(&lp->lock, flags);
- SMC_GET_MAC_CR(lp, mcr);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- if (dev->flags & IFF_PROMISC) {
-
- DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
- mcr |= MAC_CR_PRMS_;
- }
- /*
- * Here, I am setting this to accept all multicast packets.
- * I don't need to zero the multicast table, because the flag is
- * checked before the table is
- */
- else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
- mcr |= MAC_CR_MCPAS_;
- }
-
- /*
- * This sets the internal hardware table to filter out unwanted
- * multicast packets before they take up memory.
- *
- * The SMC chip uses a hash table where the high 6 bits of the CRC of
- * address are the offset into the table. If that bit is 1, then the
- * multicast packet is accepted. Otherwise, it's dropped silently.
- *
- * To use the 6 bits as an offset into the table, the high 1 bit is
- * the number of the 32 bit register, while the low 5 bits are the bit
- * within that register.
- */
- else if (!netdev_mc_empty(dev)) {
- struct netdev_hw_addr *ha;
-
- /* Set the Hash perfec mode */
- mcr |= MAC_CR_HPFILT_;
-
- /* start with a table of all zeros: reject all */
- memset(multicast_table, 0, sizeof(multicast_table));
-
- netdev_for_each_mc_addr(ha, dev) {
- u32 position;
-
- /* upper 6 bits are used as hash index */
- position = ether_crc(ETH_ALEN, ha->addr)>>26;
-
- multicast_table[position>>5] |= 1 << (position&0x1f);
- }
-
- /* be sure I get rid of flags I might have set */
- mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
-
- /* now, the table can be loaded into the chipset */
- update_multicast = 1;
- } else {
- DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
- mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
-
- /*
- * since I'm disabling all multicast entirely, I need to
- * clear the multicast list
- */
- memset(multicast_table, 0, sizeof(multicast_table));
- update_multicast = 1;
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- SMC_SET_MAC_CR(lp, mcr);
- if (update_multicast) {
- DBG(SMC_DEBUG_MISC, dev,
- "update mcast hash table 0x%08x 0x%08x\n",
- multicast_table[0], multicast_table[1]);
- SMC_SET_HASHL(lp, multicast_table[0]);
- SMC_SET_HASHH(lp, multicast_table[1]);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-
-/*
- * Open and Initialize the board
- *
- * Set up everything, reset the card, etc..
- */
-static int
-smc911x_open(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- /* reset the hardware */
- smc911x_reset(dev);
-
- /* Configure the PHY, initialize the link state */
- smc911x_phy_configure(&lp->phy_configure);
-
- /* Turn on Tx + Rx */
- smc911x_enable(dev);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-/*
- * smc911x_close
- *
- * this makes the board clean up everything that it can
- * and not talk to the outside world. Caused by
- * an 'ifconfig ethX down'
- */
-static int smc911x_close(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- netif_stop_queue(dev);
- netif_carrier_off(dev);
-
- /* clear everything */
- smc911x_shutdown(dev);
-
- if (lp->phy_type != 0) {
- /* We need to ensure that no calls to
- * smc911x_phy_configure are pending.
- */
- cancel_work_sync(&lp->phy_configure);
- smc911x_phy_powerdown(dev, lp->mii.phy_id);
- }
-
- if (lp->pending_tx_skb) {
- dev_kfree_skb(lp->pending_tx_skb);
- lp->pending_tx_skb = NULL;
- }
-
- return 0;
-}
-
-/*
- * Ethtool support
- */
-static int
-smc911x_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int status;
- unsigned long flags;
- u32 supported;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- if (lp->phy_type != 0) {
- spin_lock_irqsave(&lp->lock, flags);
- mii_ethtool_get_link_ksettings(&lp->mii, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
- } else {
- supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP | SUPPORTED_AUI;
-
- if (lp->ctl_rspeed == 10)
- cmd->base.speed = SPEED_10;
- else if (lp->ctl_rspeed == 100)
- cmd->base.speed = SPEED_100;
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- cmd->base.port = 0;
- SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
- cmd->base.duplex =
- (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
- DUPLEX_FULL : DUPLEX_HALF;
-
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.supported, supported);
-
- }
-
- return 0;
-}
-
-static int
-smc911x_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int ret;
- unsigned long flags;
-
- if (lp->phy_type != 0) {
- spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
- } else {
- if (cmd->base.autoneg != AUTONEG_DISABLE ||
- cmd->base.speed != SPEED_10 ||
- (cmd->base.duplex != DUPLEX_HALF &&
- cmd->base.duplex != DUPLEX_FULL) ||
- (cmd->base.port != PORT_TP &&
- cmd->base.port != PORT_AUI))
- return -EINVAL;
-
- lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
-
- ret = 0;
- }
-
- return ret;
-}
-
-static void
-smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strscpy(info->driver, CARDNAME, sizeof(info->driver));
- strscpy(info->version, version, sizeof(info->version));
- strscpy(info->bus_info, dev_name(dev->dev.parent),
- sizeof(info->bus_info));
-}
-
-static int smc911x_ethtool_nwayreset(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int ret = -EINVAL;
- unsigned long flags;
-
- if (lp->phy_type != 0) {
- spin_lock_irqsave(&lp->lock, flags);
- ret = mii_nway_restart(&lp->mii);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
-
- return ret;
-}
-
-static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- return lp->msg_enable;
-}
-
-static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- lp->msg_enable = level;
-}
-
-static int smc911x_ethtool_getregslen(struct net_device *dev)
-{
- /* System regs + MAC regs + PHY regs */
- return (((E2P_CMD - ID_REV)/4 + 1) +
- (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
-}
-
-static void smc911x_ethtool_getregs(struct net_device *dev,
- struct ethtool_regs *regs, void *buf)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned long flags;
- u32 reg,i,j=0;
- u32 *data = (u32*)buf;
-
- regs->version = lp->version;
- for(i=ID_REV;i<=E2P_CMD;i+=4) {
- data[j++] = SMC_inl(lp, i);
- }
- for(i=MAC_CR;i<=WUCSR;i++) {
- spin_lock_irqsave(&lp->lock, flags);
- SMC_GET_MAC_CSR(lp, i, reg);
- spin_unlock_irqrestore(&lp->lock, flags);
- data[j++] = reg;
- }
- for(i=0;i<=31;i++) {
- spin_lock_irqsave(&lp->lock, flags);
- SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
- spin_unlock_irqrestore(&lp->lock, flags);
- data[j++] = reg & 0xFFFF;
- }
-}
-
-static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- unsigned int timeout;
- int e2p_cmd;
-
- e2p_cmd = SMC_GET_E2P_CMD(lp);
- for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
- if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
- PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
- __func__);
- return -EFAULT;
- }
- mdelay(1);
- e2p_cmd = SMC_GET_E2P_CMD(lp);
- }
- if (timeout == 0) {
- PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
- __func__);
- return -ETIMEDOUT;
- }
- return 0;
-}
-
-static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
- int cmd, int addr)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int ret;
-
- if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
- return ret;
- SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
- ((cmd) & (0x7<<28)) |
- ((addr) & 0xFF));
- return 0;
-}
-
-static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
- u8 *data)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int ret;
-
- if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
- return ret;
- *data = SMC_GET_E2P_DATA(lp);
- return 0;
-}
-
-static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
- u8 data)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int ret;
-
- if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
- return ret;
- SMC_SET_E2P_DATA(lp, data);
- return 0;
-}
-
-static int smc911x_ethtool_geteeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- u8 eebuf[SMC911X_EEPROM_LEN];
- int i, ret;
-
- for(i=0;i<SMC911X_EEPROM_LEN;i++) {
- if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
- return ret;
- if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
- return ret;
- }
- memcpy(data, eebuf+eeprom->offset, eeprom->len);
- return 0;
-}
-
-static int smc911x_ethtool_seteeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- int i, ret;
-
- /* Enable erase */
- if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
- return ret;
- for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
- /* erase byte */
- if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
- return ret;
- /* write byte */
- if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
- return ret;
- if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
- return ret;
- }
- return 0;
-}
-
-static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
-{
- return SMC911X_EEPROM_LEN;
-}
-
-static const struct ethtool_ops smc911x_ethtool_ops = {
- .get_drvinfo = smc911x_ethtool_getdrvinfo,
- .get_msglevel = smc911x_ethtool_getmsglevel,
- .set_msglevel = smc911x_ethtool_setmsglevel,
- .nway_reset = smc911x_ethtool_nwayreset,
- .get_link = ethtool_op_get_link,
- .get_regs_len = smc911x_ethtool_getregslen,
- .get_regs = smc911x_ethtool_getregs,
- .get_eeprom_len = smc911x_ethtool_geteeprom_len,
- .get_eeprom = smc911x_ethtool_geteeprom,
- .set_eeprom = smc911x_ethtool_seteeprom,
- .get_link_ksettings = smc911x_ethtool_get_link_ksettings,
- .set_link_ksettings = smc911x_ethtool_set_link_ksettings,
-};
-
-/*
- * smc911x_findirq
- *
- * This routine has a simple purpose -- make the SMC chip generate an
- * interrupt, so an auto-detect routine can detect it, and find the IRQ,
- */
-static int smc911x_findirq(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int timeout = 20;
- unsigned long cookie;
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- cookie = probe_irq_on();
-
- /*
- * Force a SW interrupt
- */
-
- SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
-
- /*
- * Wait until positive that the interrupt has been generated
- */
- do {
- int int_status;
- udelay(10);
- int_status = SMC_GET_INT_EN(lp);
- if (int_status & INT_EN_SW_INT_EN_)
- break; /* got the interrupt */
- } while (--timeout);
-
- /*
- * there is really nothing that I can do here if timeout fails,
- * as autoirq_report will return a 0 anyway, which is what I
- * want in this case. Plus, the clean up is needed in both
- * cases.
- */
-
- /* and disable all interrupts again */
- SMC_SET_INT_EN(lp, 0);
-
- /* and return what I found */
- return probe_irq_off(cookie);
-}
-
-static const struct net_device_ops smc911x_netdev_ops = {
- .ndo_open = smc911x_open,
- .ndo_stop = smc911x_close,
- .ndo_start_xmit = smc911x_hard_start_xmit,
- .ndo_tx_timeout = smc911x_timeout,
- .ndo_set_rx_mode = smc911x_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = smc911x_poll_controller,
-#endif
-};
-
-/*
- * Function: smc911x_probe(unsigned long ioaddr)
- *
- * Purpose:
- * Tests to see if a given ioaddr points to an SMC911x chip.
- * Returns a 0 on success
- *
- * Algorithm:
- * (1) see if the endian word is OK
- * (1) see if I recognize the chip ID in the appropriate register
- *
- * Here I do typical initialization tasks.
- *
- * o Initialize the structure if needed
- * o print out my vanity message if not done so already
- * o print out what type of hardware is detected
- * o print out the ethernet address
- * o find the IRQ
- * o set up my private data
- * o configure the dev structure with my subroutines
- * o actually GRAB the irq.
- * o GRAB the region
- */
-static int smc911x_probe(struct net_device *dev)
-{
- struct smc911x_local *lp = netdev_priv(dev);
- int i, retval;
- unsigned int val, chip_id, revision;
- const char *version_string;
- unsigned long irq_flags;
-#ifdef SMC_USE_DMA
- struct dma_slave_config config;
- dma_cap_mask_t mask;
-#endif
- u8 addr[ETH_ALEN];
-
- DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-
- /* First, see if the endian word is recognized */
- val = SMC_GET_BYTE_TEST(lp);
- DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
- CARDNAME, val);
- if (val != 0x87654321) {
- netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
- retval = -ENODEV;
- goto err_out;
- }
-
- /*
- * check if the revision register is something that I
- * recognize. These might need to be added to later,
- * as future revisions could be added.
- */
- chip_id = SMC_GET_PN(lp);
- DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
- CARDNAME, chip_id);
- for(i=0;chip_ids[i].id != 0; i++) {
- if (chip_ids[i].id == chip_id) break;
- }
- if (!chip_ids[i].id) {
- netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
- retval = -ENODEV;
- goto err_out;
- }
- version_string = chip_ids[i].name;
-
- revision = SMC_GET_REV(lp);
- DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
-
- /* At this point I'll assume that the chip is an SMC911x. */
- DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
- CARDNAME, chip_ids[i].name);
-
- /* Validate the TX FIFO size requested */
- if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
- netdev_err(dev, "Invalid TX FIFO size requested %d\n",
- tx_fifo_kb);
- retval = -EINVAL;
- goto err_out;
- }
-
- /* fill in some of the fields */
- lp->version = chip_ids[i].id;
- lp->revision = revision;
- lp->tx_fifo_kb = tx_fifo_kb;
- /* Reverse calculate the RX FIFO size from the TX */
- lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
- lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
-
- /* Set the automatic flow control values */
- switch(lp->tx_fifo_kb) {
- /*
- * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
- * AFC_LO is AFC_HI/2
- * BACK_DUR is about 5uS*(AFC_LO) rounded down
- */
- case 2:/* 13440 Rx Data Fifo Size */
- lp->afc_cfg=0x008C46AF;break;
- case 3:/* 12480 Rx Data Fifo Size */
- lp->afc_cfg=0x0082419F;break;
- case 4:/* 11520 Rx Data Fifo Size */
- lp->afc_cfg=0x00783C9F;break;
- case 5:/* 10560 Rx Data Fifo Size */
- lp->afc_cfg=0x006E374F;break;
- case 6:/* 9600 Rx Data Fifo Size */
- lp->afc_cfg=0x0064328F;break;
- case 7:/* 8640 Rx Data Fifo Size */
- lp->afc_cfg=0x005A2D7F;break;
- case 8:/* 7680 Rx Data Fifo Size */
- lp->afc_cfg=0x0050287F;break;
- case 9:/* 6720 Rx Data Fifo Size */
- lp->afc_cfg=0x0046236F;break;
- case 10:/* 5760 Rx Data Fifo Size */
- lp->afc_cfg=0x003C1E6F;break;
- case 11:/* 4800 Rx Data Fifo Size */
- lp->afc_cfg=0x0032195F;break;
- /*
- * AFC_HI is ~1520 bytes less than RX Data Fifo Size
- * AFC_LO is AFC_HI/2
- * BACK_DUR is about 5uS*(AFC_LO) rounded down
- */
- case 12:/* 3840 Rx Data Fifo Size */
- lp->afc_cfg=0x0024124F;break;
- case 13:/* 2880 Rx Data Fifo Size */
- lp->afc_cfg=0x0015073F;break;
- case 14:/* 1920 Rx Data Fifo Size */
- lp->afc_cfg=0x0006032F;break;
- default:
- PRINTK(dev, "ERROR -- no AFC_CFG setting found");
- break;
- }
-
- DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
- "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
- lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
-
- spin_lock_init(&lp->lock);
-
- /* Get the MAC address */
- SMC_GET_MAC_ADDR(lp, addr);
- eth_hw_addr_set(dev, addr);
-
- /* now, reset the chip, and put it into a known state */
- smc911x_reset(dev);
-
- /*
- * If dev->irq is 0, then the device has to be banged on to see
- * what the IRQ is.
- *
- * Specifying an IRQ is done with the assumption that the user knows
- * what (s)he is doing. No checking is done!!!!
- */
- if (dev->irq < 1) {
- int trials;
-
- trials = 3;
- while (trials--) {
- dev->irq = smc911x_findirq(dev);
- if (dev->irq)
- break;
- /* kick the card and try again */
- smc911x_reset(dev);
- }
- }
- if (dev->irq == 0) {
- netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
- retval = -ENODEV;
- goto err_out;
- }
- dev->irq = irq_canonicalize(dev->irq);
-
- dev->netdev_ops = &smc911x_netdev_ops;
- dev->watchdog_timeo = msecs_to_jiffies(watchdog);
- dev->ethtool_ops = &smc911x_ethtool_ops;
-
- INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
- lp->mii.phy_id_mask = 0x1f;
- lp->mii.reg_num_mask = 0x1f;
- lp->mii.force_media = 0;
- lp->mii.full_duplex = 0;
- lp->mii.dev = dev;
- lp->mii.mdio_read = smc911x_phy_read;
- lp->mii.mdio_write = smc911x_phy_write;
-
- /*
- * Locate the phy, if any.
- */
- smc911x_phy_detect(dev);
-
- /* Set default parameters */
- lp->msg_enable = NETIF_MSG_LINK;
- lp->ctl_rfduplx = 1;
- lp->ctl_rspeed = 100;
-
-#ifdef SMC_DYNAMIC_BUS_CONFIG
- irq_flags = lp->cfg.irq_flags;
-#else
- irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
-#endif
-
- /* Grab the IRQ */
- retval = request_irq(dev->irq, smc911x_interrupt,
- irq_flags, dev->name, dev);
- if (retval)
- goto err_out;
-
-#ifdef SMC_USE_DMA
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- lp->rxdma = dma_request_channel(mask, NULL, NULL);
- lp->txdma = dma_request_channel(mask, NULL, NULL);
- lp->rxdma_active = 0;
- lp->txdma_active = 0;
-
- memset(&config, 0, sizeof(config));
- config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- config.src_addr = lp->physaddr + RX_DATA_FIFO;
- config.dst_addr = lp->physaddr + TX_DATA_FIFO;
- config.src_maxburst = 32;
- config.dst_maxburst = 32;
- retval = dmaengine_slave_config(lp->rxdma, &config);
- if (retval) {
- dev_err(lp->dev, "dma rx channel configuration failed: %d\n",
- retval);
- goto err_out;
- }
- retval = dmaengine_slave_config(lp->txdma, &config);
- if (retval) {
- dev_err(lp->dev, "dma tx channel configuration failed: %d\n",
- retval);
- goto err_out;
- }
-#endif
-
- retval = register_netdev(dev);
- if (retval == 0) {
- /* now, print out the card info, in a short format.. */
- netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
- version_string, lp->revision,
- dev->base_addr, dev->irq);
-
-#ifdef SMC_USE_DMA
- if (lp->rxdma)
- pr_cont(" RXDMA %p", lp->rxdma);
-
- if (lp->txdma)
- pr_cont(" TXDMA %p", lp->txdma);
-#endif
- pr_cont("\n");
- if (!is_valid_ether_addr(dev->dev_addr)) {
- netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
- } else {
- /* Print the Ethernet address */
- netdev_info(dev, "Ethernet addr: %pM\n",
- dev->dev_addr);
- }
-
- if (lp->phy_type == 0) {
- PRINTK(dev, "No PHY found\n");
- } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
- PRINTK(dev, "LAN911x Internal PHY\n");
- } else {
- PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
- }
- }
-
-err_out:
-#ifdef SMC_USE_DMA
- if (retval) {
- if (lp->rxdma)
- dma_release_channel(lp->rxdma);
- if (lp->txdma)
- dma_release_channel(lp->txdma);
- }
-#endif
- return retval;
-}
-
-/*
- * smc911x_drv_probe(void)
- *
- * Output:
- * 0 --> there is a device
- * anything else, error
- */
-static int smc911x_drv_probe(struct platform_device *pdev)
-{
- struct net_device *ndev;
- struct resource *res;
- struct smc911x_local *lp;
- void __iomem *addr;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * Request the regions.
- */
- if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
- ret = -EBUSY;
- goto out;
- }
-
- ndev = alloc_etherdev(sizeof(struct smc911x_local));
- if (!ndev) {
- ret = -ENOMEM;
- goto release_1;
- }
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- ndev->dma = (unsigned char)-1;
- ndev->irq = platform_get_irq(pdev, 0);
- if (ndev->irq < 0) {
- ret = ndev->irq;
- goto release_both;
- }
-
- lp = netdev_priv(ndev);
- lp->netdev = ndev;
-#ifdef SMC_DYNAMIC_BUS_CONFIG
- {
- struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
- if (!pd) {
- ret = -EINVAL;
- goto release_both;
- }
- memcpy(&lp->cfg, pd, sizeof(lp->cfg));
- }
-#endif
-
- addr = ioremap(res->start, SMC911X_IO_EXTENT);
- if (!addr) {
- ret = -ENOMEM;
- goto release_both;
- }
-
- platform_set_drvdata(pdev, ndev);
- lp->base = addr;
- ndev->base_addr = res->start;
- ret = smc911x_probe(ndev);
- if (ret != 0) {
- iounmap(addr);
-release_both:
- free_netdev(ndev);
-release_1:
- release_mem_region(res->start, SMC911X_IO_EXTENT);
-out:
- pr_info("%s: not found (%d).\n", CARDNAME, ret);
- }
-#ifdef SMC_USE_DMA
- else {
- lp->physaddr = res->start;
- lp->dev = &pdev->dev;
- }
-#endif
-
- return ret;
-}
-
-static int smc911x_drv_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct smc911x_local *lp = netdev_priv(ndev);
- struct resource *res;
-
- DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
-
- unregister_netdev(ndev);
-
- free_irq(ndev->irq, ndev);
-
-#ifdef SMC_USE_DMA
- {
- if (lp->rxdma)
- dma_release_channel(lp->rxdma);
- if (lp->txdma)
- dma_release_channel(lp->txdma);
- }
-#endif
- iounmap(lp->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, SMC911X_IO_EXTENT);
-
- free_netdev(ndev);
- return 0;
-}
-
-static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct net_device *ndev = platform_get_drvdata(dev);
- struct smc911x_local *lp = netdev_priv(ndev);
-
- DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
- if (ndev) {
- if (netif_running(ndev)) {
- netif_device_detach(ndev);
- smc911x_shutdown(ndev);
-#if POWER_DOWN
- /* Set D2 - Energy detect only setting */
- SMC_SET_PMT_CTRL(lp, 2<<12);
-#endif
- }
- }
- return 0;
-}
-
-static int smc911x_drv_resume(struct platform_device *dev)
-{
- struct net_device *ndev = platform_get_drvdata(dev);
-
- DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
- if (ndev) {
- struct smc911x_local *lp = netdev_priv(ndev);
-
- if (netif_running(ndev)) {
- smc911x_reset(ndev);
- if (lp->phy_type != 0)
- smc911x_phy_configure(&lp->phy_configure);
- smc911x_enable(ndev);
- netif_device_attach(ndev);
- }
- }
- return 0;
-}
-
-static struct platform_driver smc911x_driver = {
- .probe = smc911x_drv_probe,
- .remove = smc911x_drv_remove,
- .suspend = smc911x_drv_suspend,
- .resume = smc911x_drv_resume,
- .driver = {
- .name = CARDNAME,
- },
-};
-
-module_platform_driver(smc911x_driver);
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
deleted file mode 100644
index d4edcc0da87c..000000000000
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ /dev/null
@@ -1,901 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*------------------------------------------------------------------------
- . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device.
- .
- . Copyright (C) 2005 Sensoria Corp.
- . Derived from the unified SMC91x driver by Nicolas Pitre
- .
- .
- . Information contained in this file was obtained from the LAN9118
- . manual from SMC. To get a copy, if you really want one, you can find
- . information under www.smsc.com.
- .
- . Authors
- . Dustin McIntire <dustin@sensoria.com>
- .
- ---------------------------------------------------------------------------*/
-#ifndef _SMC911X_H_
-#define _SMC911X_H_
-
-#include <linux/smc911x.h>
-/*
- * Use the DMA feature on PXA chips
- */
-#ifdef CONFIG_ARCH_PXA
- #define SMC_USE_PXA_DMA 1
- #define SMC_USE_16BIT 0
- #define SMC_USE_32BIT 1
- #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING
-#elif defined(CONFIG_SH_MAGIC_PANEL_R2)
- #define SMC_USE_16BIT 0
- #define SMC_USE_32BIT 1
- #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
-#elif defined(CONFIG_ARCH_OMAP3)
- #define SMC_USE_16BIT 0
- #define SMC_USE_32BIT 1
- #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
- #define SMC_MEM_RESERVED 1
-#elif defined(CONFIG_ARCH_OMAP2)
- #define SMC_USE_16BIT 0
- #define SMC_USE_32BIT 1
- #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
- #define SMC_MEM_RESERVED 1
-#else
-/*
- * Default configuration
- */
-
-#define SMC_DYNAMIC_BUS_CONFIG
-#endif
-
-#ifdef SMC_USE_PXA_DMA
-#define SMC_USE_DMA
-#endif
-
-/* store this information for the driver.. */
-struct smc911x_local {
- /*
- * If I have to wait until the DMA is finished and ready to reload a
- * packet, I will store the skbuff here. Then, the DMA will send it
- * out and free it.
- */
- struct sk_buff *pending_tx_skb;
-
- /* version/revision of the SMC911x chip */
- u16 version;
- u16 revision;
-
- /* FIFO sizes */
- int tx_fifo_kb;
- int tx_fifo_size;
- int rx_fifo_size;
- int afc_cfg;
-
- /* Contains the current active receive/phy mode */
- int ctl_rfduplx;
- int ctl_rspeed;
-
- u32 msg_enable;
- u32 phy_type;
- struct mii_if_info mii;
-
- /* work queue */
- struct work_struct phy_configure;
-
- int tx_throttle;
- spinlock_t lock;
-
- struct net_device *netdev;
-
-#ifdef SMC_USE_DMA
- /* DMA needs the physical address of the chip */
- u_long physaddr;
- struct dma_chan *rxdma;
- struct dma_chan *txdma;
- int rxdma_active;
- int txdma_active;
- struct sk_buff *current_rx_skb;
- struct sk_buff *current_tx_skb;
- struct device *dev;
-#endif
- void __iomem *base;
-#ifdef SMC_DYNAMIC_BUS_CONFIG
- struct smc911x_platdata cfg;
-#endif
-};
-
-/*
- * Define the bus width specific IO macros
- */
-
-#ifdef SMC_DYNAMIC_BUS_CONFIG
-static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg)
-{
- void __iomem *ioaddr = lp->base + reg;
-
- if (lp->cfg.flags & SMC911X_USE_32BIT)
- return readl(ioaddr);
-
- if (lp->cfg.flags & SMC911X_USE_16BIT)
- return readw(ioaddr) | (readw(ioaddr + 2) << 16);
-
- BUG();
-}
-
-static inline void SMC_outl(unsigned int value, struct smc911x_local *lp,
- int reg)
-{
- void __iomem *ioaddr = lp->base + reg;
-
- if (lp->cfg.flags & SMC911X_USE_32BIT) {
- writel(value, ioaddr);
- return;
- }
-
- if (lp->cfg.flags & SMC911X_USE_16BIT) {
- writew(value & 0xffff, ioaddr);
- writew(value >> 16, ioaddr + 2);
- return;
- }
-
- BUG();
-}
-
-static inline void SMC_insl(struct smc911x_local *lp, int reg,
- void *addr, unsigned int count)
-{
- void __iomem *ioaddr = lp->base + reg;
-
- if (lp->cfg.flags & SMC911X_USE_32BIT) {
- ioread32_rep(ioaddr, addr, count);
- return;
- }
-
- if (lp->cfg.flags & SMC911X_USE_16BIT) {
- ioread16_rep(ioaddr, addr, count * 2);
- return;
- }
-
- BUG();
-}
-
-static inline void SMC_outsl(struct smc911x_local *lp, int reg,
- void *addr, unsigned int count)
-{
- void __iomem *ioaddr = lp->base + reg;
-
- if (lp->cfg.flags & SMC911X_USE_32BIT) {
- iowrite32_rep(ioaddr, addr, count);
- return;
- }
-
- if (lp->cfg.flags & SMC911X_USE_16BIT) {
- iowrite16_rep(ioaddr, addr, count * 2);
- return;
- }
-
- BUG();
-}
-#else
-#if SMC_USE_16BIT
-#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16))
-#define SMC_outl(v, lp, r) \
- do{ \
- writew(v & 0xFFFF, (lp)->base + (r)); \
- writew(v >> 16, (lp)->base + (r) + 2); \
- } while (0)
-#define SMC_insl(lp, r, p, l) ioread16_rep((short*)((lp)->base + (r)), p, l*2)
-#define SMC_outsl(lp, r, p, l) iowrite16_rep((short*)((lp)->base + (r)), p, l*2)
-
-#elif SMC_USE_32BIT
-#define SMC_inl(lp, r) readl((lp)->base + (r))
-#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r))
-#define SMC_insl(lp, r, p, l) ioread32_rep((int*)((lp)->base + (r)), p, l)
-#define SMC_outsl(lp, r, p, l) iowrite32_rep((int*)((lp)->base + (r)), p, l)
-
-#endif /* SMC_USE_16BIT */
-#endif /* SMC_DYNAMIC_BUS_CONFIG */
-
-
-#ifdef SMC_USE_PXA_DMA
-
-/*
- * Use a DMA for RX and TX packets.
- */
-#include <linux/dma-mapping.h>
-
-static dma_addr_t rx_dmabuf, tx_dmabuf;
-static int rx_dmalen, tx_dmalen;
-static void smc911x_rx_dma_irq(void *data);
-static void smc911x_tx_dma_irq(void *data);
-
-#ifdef SMC_insl
-#undef SMC_insl
-#define SMC_insl(lp, r, p, l) \
- smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l)
-
-static inline void
-smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr,
- int reg, struct dma_chan *dma, u_char *buf, int len)
-{
- struct dma_async_tx_descriptor *tx;
-
- /* 64 bit alignment is required for memory to memory DMA */
- if ((long)buf & 4) {
- *((u32 *)buf) = SMC_inl(lp, reg);
- buf += 4;
- len--;
- }
-
- len *= 4;
- rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE);
- rx_dmalen = len;
- tx = dmaengine_prep_slave_single(dma, rx_dmabuf, rx_dmalen,
- DMA_DEV_TO_MEM, 0);
- if (tx) {
- tx->callback = smc911x_rx_dma_irq;
- tx->callback_param = lp;
- dmaengine_submit(tx);
- dma_async_issue_pending(dma);
- }
-}
-#endif
-
-#ifdef SMC_outsl
-#undef SMC_outsl
-#define SMC_outsl(lp, r, p, l) \
- smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l)
-
-static inline void
-smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
- int reg, struct dma_chan *dma, u_char *buf, int len)
-{
- struct dma_async_tx_descriptor *tx;
-
- /* 64 bit alignment is required for memory to memory DMA */
- if ((long)buf & 4) {
- SMC_outl(*((u32 *)buf), lp, reg);
- buf += 4;
- len--;
- }
-
- len *= 4;
- tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE);
- tx_dmalen = len;
- tx = dmaengine_prep_slave_single(dma, tx_dmabuf, tx_dmalen,
- DMA_DEV_TO_MEM, 0);
- if (tx) {
- tx->callback = smc911x_tx_dma_irq;
- tx->callback_param = lp;
- dmaengine_submit(tx);
- dma_async_issue_pending(dma);
- }
-}
-#endif
-#endif /* SMC_USE_PXA_DMA */
-
-
-/* Chip Parameters and Register Definitions */
-
-#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2)
-
-#define SMC911X_IO_EXTENT 0x100
-
-#define SMC911X_EEPROM_LEN 7
-
-/* Below are the register offsets and bit definitions
- * of the Lan911x memory space
- */
-#define RX_DATA_FIFO (0x00)
-
-#define TX_DATA_FIFO (0x20)
-#define TX_CMD_A_INT_ON_COMP_ (0x80000000)
-#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000)
-#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000)
-#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000)
-#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000)
-#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000)
-#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000)
-#define TX_CMD_A_INT_LAST_SEG_ (0x00001000)
-#define TX_CMD_A_BUF_SIZE_ (0x000007FF)
-#define TX_CMD_B_PKT_TAG_ (0xFFFF0000)
-#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000)
-#define TX_CMD_B_DISABLE_PADDING_ (0x00001000)
-#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF)
-
-#define RX_STATUS_FIFO (0x40)
-#define RX_STS_PKT_LEN_ (0x3FFF0000)
-#define RX_STS_ES_ (0x00008000)
-#define RX_STS_BCST_ (0x00002000)
-#define RX_STS_LEN_ERR_ (0x00001000)
-#define RX_STS_RUNT_ERR_ (0x00000800)
-#define RX_STS_MCAST_ (0x00000400)
-#define RX_STS_TOO_LONG_ (0x00000080)
-#define RX_STS_COLL_ (0x00000040)
-#define RX_STS_ETH_TYPE_ (0x00000020)
-#define RX_STS_WDOG_TMT_ (0x00000010)
-#define RX_STS_MII_ERR_ (0x00000008)
-#define RX_STS_DRIBBLING_ (0x00000004)
-#define RX_STS_CRC_ERR_ (0x00000002)
-#define RX_STATUS_FIFO_PEEK (0x44)
-#define TX_STATUS_FIFO (0x48)
-#define TX_STS_TAG_ (0xFFFF0000)
-#define TX_STS_ES_ (0x00008000)
-#define TX_STS_LOC_ (0x00000800)
-#define TX_STS_NO_CARR_ (0x00000400)
-#define TX_STS_LATE_COLL_ (0x00000200)
-#define TX_STS_MANY_COLL_ (0x00000100)
-#define TX_STS_COLL_CNT_ (0x00000078)
-#define TX_STS_MANY_DEFER_ (0x00000004)
-#define TX_STS_UNDERRUN_ (0x00000002)
-#define TX_STS_DEFERRED_ (0x00000001)
-#define TX_STATUS_FIFO_PEEK (0x4C)
-#define ID_REV (0x50)
-#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */
-#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */
-
-#define INT_CFG (0x54)
-#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */
-#define INT_CFG_INT_DEAS_CLR_ (0x00004000)
-#define INT_CFG_INT_DEAS_STS_ (0x00002000)
-#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */
-#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */
-#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */
-#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */
-
-#define INT_STS (0x58)
-#define INT_STS_SW_INT_ (0x80000000) /* R/WC */
-#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */
-#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */
-#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */
-#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */
-#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */
-#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */
-#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */
-#define INT_STS_PHY_INT_ (0x00040000) /* RO */
-#define INT_STS_PME_INT_ (0x00020000) /* R/WC */
-#define INT_STS_TXSO_ (0x00010000) /* R/WC */
-#define INT_STS_RWT_ (0x00008000) /* R/WC */
-#define INT_STS_RXE_ (0x00004000) /* R/WC */
-#define INT_STS_TXE_ (0x00002000) /* R/WC */
-//#define INT_STS_ERX_ (0x00001000) /* R/WC */
-#define INT_STS_TDFU_ (0x00000800) /* R/WC */
-#define INT_STS_TDFO_ (0x00000400) /* R/WC */
-#define INT_STS_TDFA_ (0x00000200) /* R/WC */
-#define INT_STS_TSFF_ (0x00000100) /* R/WC */
-#define INT_STS_TSFL_ (0x00000080) /* R/WC */
-//#define INT_STS_RXDF_ (0x00000040) /* R/WC */
-#define INT_STS_RDFO_ (0x00000040) /* R/WC */
-#define INT_STS_RDFL_ (0x00000020) /* R/WC */
-#define INT_STS_RSFF_ (0x00000010) /* R/WC */
-#define INT_STS_RSFL_ (0x00000008) /* R/WC */
-#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */
-#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */
-#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */
-
-#define INT_EN (0x5C)
-#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */
-#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */
-#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */
-#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */
-//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */
-#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */
-#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */
-#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */
-#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */
-#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */
-#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */
-#define INT_EN_RWT_EN_ (0x00008000) /* R/W */
-#define INT_EN_RXE_EN_ (0x00004000) /* R/W */
-#define INT_EN_TXE_EN_ (0x00002000) /* R/W */
-//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */
-#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */
-#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */
-#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */
-#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */
-#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */
-//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */
-#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */
-#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */
-#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */
-#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */
-#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */
-#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */
-#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */
-
-#define BYTE_TEST (0x64)
-#define FIFO_INT (0x68)
-#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */
-#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */
-#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */
-#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */
-
-#define RX_CFG (0x6C)
-#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */
-#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */
-#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */
-#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */
-#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */
-#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */
-#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */
-//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */
-
-#define TX_CFG (0x70)
-//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */
-//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */
-#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */
-#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */
-#define TX_CFG_TXSAO_ (0x00000004) /* R/W */
-#define TX_CFG_TX_ON_ (0x00000002) /* R/W */
-#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */
-
-#define HW_CFG (0x74)
-#define HW_CFG_TTM_ (0x00200000) /* R/W */
-#define HW_CFG_SF_ (0x00100000) /* R/W */
-#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */
-#define HW_CFG_TR_ (0x00003000) /* R/W */
-#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */
-#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */
-#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */
-#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */
-#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */
-#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */
-#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */
-#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */
-#define HW_CFG_SRST_TO_ (0x00000002) /* RO */
-#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */
-
-#define RX_DP_CTRL (0x78)
-#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */
-#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */
-
-#define RX_FIFO_INF (0x7C)
-#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */
-#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */
-
-#define TX_FIFO_INF (0x80)
-#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */
-#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */
-
-#define PMT_CTRL (0x84)
-#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */
-#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */
-#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */
-#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */
-#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */
-#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */
-#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */
-#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */
-#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */
-#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */
-#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */
-#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */
-#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */
-#define PMT_CTRL_READY_ (0x00000001) /* RO */
-
-#define GPIO_CFG (0x88)
-#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */
-#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */
-#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */
-#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */
-#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */
-#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */
-#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */
-#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */
-#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */
-#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */
-#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */
-#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */
-#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */
-#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */
-#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */
-#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */
-#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */
-#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */
-
-#define GPT_CFG (0x8C)
-#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */
-#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */
-
-#define GPT_CNT (0x90)
-#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */
-
-#define ENDIAN (0x98)
-#define FREE_RUN (0x9C)
-#define RX_DROP (0xA0)
-#define MAC_CSR_CMD (0xA4)
-#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */
-#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */
-#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */
-
-#define MAC_CSR_DATA (0xA8)
-#define AFC_CFG (0xAC)
-#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */
-#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */
-#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */
-#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */
-#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */
-#define AFC_CFG_FCADD_ (0x00000002) /* R/W */
-#define AFC_CFG_FCANY_ (0x00000001) /* R/W */
-
-#define E2P_CMD (0xB0)
-#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */
-#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */
-#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */
-#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */
-#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */
-#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */
-#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */
-#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */
-#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */
-#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */
-#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */
-#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */
-#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */
-
-#define E2P_DATA (0xB4)
-#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */
-/* end of LAN register offsets and bit definitions */
-
-/*
- ****************************************************************************
- ****************************************************************************
- * MAC Control and Status Register (Indirect Address)
- * Offset (through the MAC_CSR CMD and DATA port)
- ****************************************************************************
- ****************************************************************************
- *
- */
-#define MAC_CR (0x01) /* R/W */
-
-/* MAC_CR - MAC Control Register */
-#define MAC_CR_RXALL_ (0x80000000)
-// TODO: delete this bit? It is not described in the data sheet.
-#define MAC_CR_HBDIS_ (0x10000000)
-#define MAC_CR_RCVOWN_ (0x00800000)
-#define MAC_CR_LOOPBK_ (0x00200000)
-#define MAC_CR_FDPX_ (0x00100000)
-#define MAC_CR_MCPAS_ (0x00080000)
-#define MAC_CR_PRMS_ (0x00040000)
-#define MAC_CR_INVFILT_ (0x00020000)
-#define MAC_CR_PASSBAD_ (0x00010000)
-#define MAC_CR_HFILT_ (0x00008000)
-#define MAC_CR_HPFILT_ (0x00002000)
-#define MAC_CR_LCOLL_ (0x00001000)
-#define MAC_CR_BCAST_ (0x00000800)
-#define MAC_CR_DISRTY_ (0x00000400)
-#define MAC_CR_PADSTR_ (0x00000100)
-#define MAC_CR_BOLMT_MASK_ (0x000000C0)
-#define MAC_CR_DFCHK_ (0x00000020)
-#define MAC_CR_TXEN_ (0x00000008)
-#define MAC_CR_RXEN_ (0x00000004)
-
-#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */
-#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */
-#define HASHH (0x04) /* R/W */
-#define HASHL (0x05) /* R/W */
-
-#define MII_ACC (0x06) /* R/W */
-#define MII_ACC_PHY_ADDR_ (0x0000F800)
-#define MII_ACC_MIIRINDA_ (0x000007C0)
-#define MII_ACC_MII_WRITE_ (0x00000002)
-#define MII_ACC_MII_BUSY_ (0x00000001)
-
-#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */
-
-#define FLOW (0x08) /* R/W */
-#define FLOW_FCPT_ (0xFFFF0000)
-#define FLOW_FCPASS_ (0x00000004)
-#define FLOW_FCEN_ (0x00000002)
-#define FLOW_FCBSY_ (0x00000001)
-
-#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */
-#define VLAN1_VTI1_ (0x0000ffff)
-
-#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */
-#define VLAN2_VTI2_ (0x0000ffff)
-
-#define WUFF (0x0B) /* WO */
-
-#define WUCSR (0x0C) /* R/W */
-#define WUCSR_GUE_ (0x00000200)
-#define WUCSR_WUFR_ (0x00000040)
-#define WUCSR_MPR_ (0x00000020)
-#define WUCSR_WAKE_EN_ (0x00000004)
-#define WUCSR_MPEN_ (0x00000002)
-
-/*
- ****************************************************************************
- * Chip Specific MII Defines
- ****************************************************************************
- *
- * Phy register offsets and bit definitions
- *
- */
-
-#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */
-//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000)
-#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
-//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800)
-//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400)
-//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200)
-//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100)
-//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010)
-//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008)
-//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004)
-#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002)
-
-#define PHY_INT_SRC ((u32)29)
-#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080)
-#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040)
-#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020)
-#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010)
-#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008)
-#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004)
-#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002)
-
-#define PHY_INT_MASK ((u32)30)
-#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080)
-#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040)
-#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020)
-#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010)
-#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008)
-#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004)
-#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002)
-
-#define PHY_SPECIAL ((u32)31)
-#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000)
-#define PHY_SPECIAL_RES_ ((u16)0x0040)
-#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1)
-#define PHY_SPECIAL_SPD_ ((u16)0x001C)
-#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004)
-#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014)
-#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008)
-#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018)
-
-#define LAN911X_INTERNAL_PHY_ID (0x0007C000)
-
-/* Chip ID values */
-#define CHIP_9115 0x0115
-#define CHIP_9116 0x0116
-#define CHIP_9117 0x0117
-#define CHIP_9118 0x0118
-#define CHIP_9211 0x9211
-#define CHIP_9215 0x115A
-#define CHIP_9217 0x117A
-#define CHIP_9218 0x118A
-
-struct chip_id {
- u16 id;
- char *name;
-};
-
-static const struct chip_id chip_ids[] = {
- { CHIP_9115, "LAN9115" },
- { CHIP_9116, "LAN9116" },
- { CHIP_9117, "LAN9117" },
- { CHIP_9118, "LAN9118" },
- { CHIP_9211, "LAN9211" },
- { CHIP_9215, "LAN9215" },
- { CHIP_9217, "LAN9217" },
- { CHIP_9218, "LAN9218" },
- { 0, NULL },
-};
-
-#define IS_REV_A(x) ((x & 0xFFFF)==0)
-
-/*
- * Macros to abstract register access according to the data bus
- * capabilities. Please use those and not the in/out primitives.
- */
-/* FIFO read/write macros */
-#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 )
-#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 )
-#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO )
-#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO )
-
-
-/* I/O mapped register read/write macros */
-#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO )
-#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO )
-#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK )
-#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16)
-#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF)
-#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG )
-#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG )
-#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS )
-#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS )
-#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN )
-#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN )
-#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST )
-#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST )
-#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT )
-#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT )
-#define SMC_SET_FIFO_TDA(lp, x) \
- do { \
- unsigned long __flags; \
- int __mask; \
- local_irq_save(__flags); \
- __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \
- SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \
- local_irq_restore(__flags); \
- } while (0)
-#define SMC_SET_FIFO_TSL(lp, x) \
- do { \
- unsigned long __flags; \
- int __mask; \
- local_irq_save(__flags); \
- __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \
- SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \
- local_irq_restore(__flags); \
- } while (0)
-#define SMC_SET_FIFO_RSA(lp, x) \
- do { \
- unsigned long __flags; \
- int __mask; \
- local_irq_save(__flags); \
- __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \
- SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \
- local_irq_restore(__flags); \
- } while (0)
-#define SMC_SET_FIFO_RSL(lp, x) \
- do { \
- unsigned long __flags; \
- int __mask; \
- local_irq_save(__flags); \
- __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \
- SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \
- local_irq_restore(__flags); \
- } while (0)
-#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG )
-#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG )
-#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG )
-#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG )
-#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG )
-#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG )
-#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL )
-#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL )
-#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL )
-#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL )
-#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG )
-#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG )
-#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF )
-#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF )
-#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF )
-#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF )
-#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG )
-#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG )
-#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP )
-#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP )
-#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD )
-#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD )
-#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA )
-#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA )
-#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG )
-#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG )
-#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD )
-#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD )
-#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA )
-#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA )
-
-/* MAC register read/write macros */
-#define SMC_GET_MAC_CSR(lp,a,v) \
- do { \
- while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
- SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \
- MAC_CSR_CMD_R_NOT_W_ | (a) ); \
- while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
- v = SMC_GET_MAC_DATA((lp)); \
- } while (0)
-#define SMC_SET_MAC_CSR(lp,a,v) \
- do { \
- while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
- SMC_SET_MAC_DATA((lp), v); \
- SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \
- while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
- } while (0)
-#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x )
-#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x )
-#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x )
-#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x )
-#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x )
-#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x )
-#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x )
-#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x )
-#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x )
-#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x )
-#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x )
-#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x )
-#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x )
-#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x )
-#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x )
-#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x )
-#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x )
-#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x )
-#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x )
-#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x )
-#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x )
-#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x )
-#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x )
-
-/* PHY register read/write macros */
-#define SMC_GET_MII(lp,a,phy,v) \
- do { \
- u32 __v; \
- do { \
- SMC_GET_MII_ACC((lp), __v); \
- } while ( __v & MII_ACC_MII_BUSY_ ); \
- SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \
- MII_ACC_MII_BUSY_); \
- do { \
- SMC_GET_MII_ACC( (lp), __v); \
- } while ( __v & MII_ACC_MII_BUSY_ ); \
- SMC_GET_MII_DATA((lp), v); \
- } while (0)
-#define SMC_SET_MII(lp,a,phy,v) \
- do { \
- u32 __v; \
- do { \
- SMC_GET_MII_ACC((lp), __v); \
- } while ( __v & MII_ACC_MII_BUSY_ ); \
- SMC_SET_MII_DATA((lp), v); \
- SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \
- MII_ACC_MII_BUSY_ | \
- MII_ACC_MII_WRITE_ ); \
- do { \
- SMC_GET_MII_ACC((lp), __v); \
- } while ( __v & MII_ACC_MII_BUSY_ ); \
- } while (0)
-#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x )
-#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x )
-#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x )
-#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x )
-#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x )
-#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x )
-#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x )
-#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x )
-#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x )
-#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x )
-#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x )
-#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x )
-#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x )
-#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x )
-#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x )
-#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x )
-
-
-
-/* Misc read/write macros */
-
-#ifndef SMC_GET_MAC_ADDR
-#define SMC_GET_MAC_ADDR(lp, addr) \
- do { \
- unsigned int __v; \
- \
- SMC_GET_MAC_CSR((lp), ADDRL, __v); \
- addr[0] = __v; addr[1] = __v >> 8; \
- addr[2] = __v >> 16; addr[3] = __v >> 24; \
- SMC_GET_MAC_CSR((lp), ADDRH, __v); \
- addr[4] = __v; addr[5] = __v >> 8; \
- } while (0)
-#endif
-
-#define SMC_SET_MAC_ADDR(lp, addr) \
- do { \
- SMC_SET_MAC_CSR((lp), ADDRL, \
- addr[0] | \
- (addr[1] << 8) | \
- (addr[2] << 16) | \
- (addr[3] << 24)); \
- SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\
- } while (0)
-
-
-#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \
- do { \
- while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
- SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \
- while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
- } while (0)
-
-#endif /* _SMC911X_H_ */
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index d2c6a5dfdc0e..492c39c08af1 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1508,16 +1508,16 @@ static void ave_get_stats64(struct net_device *ndev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+ start = u64_stats_fetch_begin(&priv->stats_rx.syncp);
stats->rx_packets = priv->stats_rx.packets;
stats->rx_bytes = priv->stats_rx.bytes;
- } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+ } while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+ start = u64_stats_fetch_begin(&priv->stats_tx.syncp);
stats->tx_packets = priv->stats_tx.packets;
stats->tx_bytes = priv->stats_tx.bytes;
- } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+ } while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start));
stats->rx_errors = priv->stats_rx.errors;
stats->tx_errors = priv->stats_tx.errors;
@@ -1766,12 +1766,6 @@ static int ave_resume(struct device *dev)
wol.wolopts = priv->wolopts;
__ave_ethtool_set_wol(ndev, &wol);
- if (ndev->phydev) {
- ret = phy_resume(ndev->phydev);
- if (ret)
- return ret;
- }
-
if (netif_running(ndev)) {
ret = ave_open(ndev);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 31ff35174034..f77511fe4e87 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -235,6 +235,15 @@ config DWMAC_INTEL_PLAT
the stmmac device driver. This driver is used for the Intel Keem Bay
SoC.
+config DWMAC_TEGRA
+ tristate "NVIDIA Tegra MGBE support"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ help
+ This selects the Multi-GigaBit Ethernet (MGBE) Controller that is
+ found on the NVIDIA Tegra SoC devices. This driver provides the glue
+ layer on top of the stmmac driver required for these NVIDIA Tegra SoC
+ devices.
+
config DWMAC_VISCONTI
tristate "Toshiba Visconti DWMAC support"
default ARCH_VISCONTI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index d4e12e9ace4f..057e4bab5c08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
+obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o
obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
new file mode 100644
index 000000000000..bdf990cf2f31
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/stmmac.h>
+#include <linux/clk.h>
+
+#include "stmmac_platform.h"
+
+static const char *const mgbe_clks[] = {
+ "rx-pcs", "tx", "tx-pcs", "mac-divider", "mac", "mgbe", "ptp-ref", "mac"
+};
+
+struct tegra_mgbe {
+ struct device *dev;
+
+ struct clk_bulk_data *clks;
+
+ struct reset_control *rst_mac;
+ struct reset_control *rst_pcs;
+
+ void __iomem *hv;
+ void __iomem *regs;
+ void __iomem *xpcs;
+
+ struct mii_bus *mii;
+};
+
+#define XPCS_WRAP_UPHY_RX_CONTROL 0x801c
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_SW_OVRD BIT(31)
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY BIT(10)
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET BIT(9)
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN BIT(8)
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP (BIT(7) | BIT(6))
+#define XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ BIT(5)
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_IDDQ BIT(4)
+#define XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN BIT(0)
+#define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020
+#define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN BIT(0)
+#define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN BIT(2)
+#define XPCS_WRAP_UPHY_STATUS 0x8044
+#define XPCS_WRAP_UPHY_STATUS_TX_P_UP BIT(0)
+#define XPCS_WRAP_IRQ_STATUS 0x8050
+#define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS BIT(6)
+
+#define XPCS_REG_ADDR_SHIFT 10
+#define XPCS_REG_ADDR_MASK 0x1fff
+#define XPCS_ADDR 0x3fc
+
+#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
+#define MAC_SBD_INTR BIT(2)
+#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
+#define MGBE_SID 0x6
+
+static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
+{
+ struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(dev);
+ int err;
+
+ err = stmmac_suspend(dev);
+ if (err)
+ return err;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+
+ return reset_control_assert(mgbe->rst_mac);
+}
+
+static int __maybe_unused tegra_mgbe_resume(struct device *dev)
+{
+ struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(dev);
+ u32 value;
+ int err;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+ if (err < 0)
+ return err;
+
+ err = reset_control_deassert(mgbe->rst_mac);
+ if (err < 0)
+ return err;
+
+ /* Enable common interrupt at wrapper level */
+ writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
+
+ /* Program SID */
+ writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
+ if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
+ value |= XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
+ }
+
+ err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL, value,
+ (value & XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) == 0,
+ 500, 500 * 2000);
+ if (err < 0) {
+ dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
+ clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+ return err;
+ }
+
+ err = stmmac_resume(dev);
+ if (err < 0)
+ clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+
+ return err;
+}
+
+static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_data)
+{
+ struct tegra_mgbe *mgbe = (struct tegra_mgbe *)mgbe_data;
+ u32 value;
+ int err;
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_SW_OVRD;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_IDDQ;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL, value,
+ (value & XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN) == 0,
+ 1000, 1000 * 2000);
+ if (err < 0) {
+ dev_err(mgbe->dev, "timeout waiting for RX calibration to become enabled\n");
+ return err;
+ }
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
+ value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
+ 500, 500 * 2000);
+ if (err < 0) {
+ dev_err(mgbe->dev, "timeout waiting for link to become ready\n");
+ return err;
+ }
+
+ /* clear status */
+ writel(value, mgbe->xpcs + XPCS_WRAP_IRQ_STATUS);
+
+ return 0;
+}
+
+static void mgbe_uphy_lane_bringup_serdes_down(struct net_device *ndev, void *mgbe_data)
+{
+ struct tegra_mgbe *mgbe = (struct tegra_mgbe *)mgbe_data;
+ u32 value;
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_SW_OVRD;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_IDDQ;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+}
+
+static int tegra_mgbe_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ struct tegra_mgbe *mgbe;
+ int irq, err, i;
+ u32 value;
+
+ mgbe = devm_kzalloc(&pdev->dev, sizeof(*mgbe), GFP_KERNEL);
+ if (!mgbe)
+ return -ENOMEM;
+
+ mgbe->dev = &pdev->dev;
+
+ memset(&res, 0, sizeof(res));
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mgbe->hv = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
+ if (IS_ERR(mgbe->hv))
+ return PTR_ERR(mgbe->hv);
+
+ mgbe->regs = devm_platform_ioremap_resource_byname(pdev, "mac");
+ if (IS_ERR(mgbe->regs))
+ return PTR_ERR(mgbe->regs);
+
+ mgbe->xpcs = devm_platform_ioremap_resource_byname(pdev, "xpcs");
+ if (IS_ERR(mgbe->xpcs))
+ return PTR_ERR(mgbe->xpcs);
+
+ res.addr = mgbe->regs;
+ res.irq = irq;
+
+ mgbe->clks = devm_kzalloc(&pdev->dev, sizeof(*mgbe->clks), GFP_KERNEL);
+ if (!mgbe->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(mgbe_clks); i++)
+ mgbe->clks[i].id = mgbe_clks[i];
+
+ err = devm_clk_bulk_get(mgbe->dev, ARRAY_SIZE(mgbe_clks), mgbe->clks);
+ if (err < 0)
+ return err;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+ if (err < 0)
+ return err;
+
+ /* Perform MAC reset */
+ mgbe->rst_mac = devm_reset_control_get(&pdev->dev, "mac");
+ if (IS_ERR(mgbe->rst_mac)) {
+ err = PTR_ERR(mgbe->rst_mac);
+ goto disable_clks;
+ }
+
+ err = reset_control_assert(mgbe->rst_mac);
+ if (err < 0)
+ goto disable_clks;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_deassert(mgbe->rst_mac);
+ if (err < 0)
+ goto disable_clks;
+
+ /* Perform PCS reset */
+ mgbe->rst_pcs = devm_reset_control_get(&pdev->dev, "pcs");
+ if (IS_ERR(mgbe->rst_pcs)) {
+ err = PTR_ERR(mgbe->rst_pcs);
+ goto disable_clks;
+ }
+
+ err = reset_control_assert(mgbe->rst_pcs);
+ if (err < 0)
+ goto disable_clks;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_deassert(mgbe->rst_pcs);
+ if (err < 0)
+ goto disable_clks;
+
+ plat = stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat)) {
+ err = PTR_ERR(plat);
+ goto disable_clks;
+ }
+
+ plat->has_xgmac = 1;
+ plat->tso_en = 1;
+ plat->pmt = 1;
+ plat->bsp_priv = mgbe;
+
+ if (!plat->mdio_node)
+ plat->mdio_node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+
+ if (!plat->mdio_bus_data) {
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data) {
+ err = -ENOMEM;
+ goto remove;
+ }
+ }
+
+ plat->mdio_bus_data->needs_reset = true;
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
+ if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
+ value |= XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
+ }
+
+ err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL, value,
+ (value & XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) == 0,
+ 500, 500 * 2000);
+ if (err < 0) {
+ dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
+ goto remove;
+ }
+
+ plat->serdes_powerup = mgbe_uphy_lane_bringup_serdes_up;
+ plat->serdes_powerdown = mgbe_uphy_lane_bringup_serdes_down;
+
+ /* Tx FIFO Size - 128KB */
+ plat->tx_fifo_size = 131072;
+ /* Rx FIFO Size - 192KB */
+ plat->rx_fifo_size = 196608;
+
+ /* Enable common interrupt at wrapper level */
+ writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
+
+ /* Program SID */
+ writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+
+ plat->serdes_up_after_phy_linkup = 1;
+
+ err = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (err < 0)
+ goto remove;
+
+ return 0;
+
+remove:
+ stmmac_remove_config_dt(pdev, plat);
+disable_clks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+
+ return err;
+}
+
+static int tegra_mgbe_remove(struct platform_device *pdev)
+{
+ struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(&pdev->dev);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
+
+ stmmac_pltfr_remove(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_mgbe_match[] = {
+ { .compatible = "nvidia,tegra234-mgbe", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_mgbe_match);
+
+static SIMPLE_DEV_PM_OPS(tegra_mgbe_pm_ops, tegra_mgbe_suspend, tegra_mgbe_resume);
+
+static struct platform_driver tegra_mgbe_driver = {
+ .probe = tegra_mgbe_probe,
+ .remove = tegra_mgbe_remove,
+ .driver = {
+ .name = "tegra-mgbe",
+ .pm = &tegra_mgbe_pm_ops,
+ .of_match_table = tegra_mgbe_match,
+ },
+};
+module_platform_driver(tegra_mgbe_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra MGBE driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 71dad409f78b..ccd49346d3b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -331,9 +331,7 @@ enum power_event {
#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
-#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
-#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
-#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
+#define MTL_RXQ_DMA_QXMDMACH_MASK(x) (0xf << 8 * (x))
#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
#define MTL_CHAN_BASE_ADDR 0x00000d00
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index e5cfde1cbd5c..8c7a0b7c9952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -214,26 +214,17 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
void __iomem *ioaddr = hw->pcsr;
u32 value;
- if (queue < 4)
+ if (queue < 4) {
value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
- else
- value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
-
- if (queue == 0 || queue == 4) {
- value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
- value |= MTL_RXQ_DMA_Q04MDMACH(chan);
- } else if (queue > 4) {
- value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
- value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
- } else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
- }
-
- if (queue < 4)
writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
- else
+ } else {
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
+ }
}
static void dwmac4_config_cbs(struct mac_device_info *hw,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 23ec0a9e396c..f36590d0c830 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -988,6 +988,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 old_ctrl, ctrl;
+ if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
+ priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
+
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl = old_ctrl & ~priv->hw->link.speed_mask;
@@ -1088,7 +1091,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
@@ -3810,7 +3812,7 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
- if (priv->plat->serdes_powerup) {
+ if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: Serdes powerup failed\n",
@@ -7519,7 +7521,7 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- if (priv->plat->serdes_powerup) {
+ if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(ndev,
priv->plat->bsp_priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 4d11980dcd64..fc06ddeac0d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -15,29 +15,20 @@
* stmmac_adjust_freq
*
* @ptp: pointer to ptp_clock_info structure
- * @ppb: desired period change in parts ber billion
+ * @scaled_ppm: desired period change in scaled parts per million
*
* Description: this function will adjust the frequency of hardware clock.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+static int stmmac_adjust_freq(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
unsigned long flags;
- u32 diff, addend;
- int neg_adj = 0;
- u64 adj;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
+ u32 addend;
- addend = priv->default_addend;
- adj = addend;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
- addend = neg_adj ? (addend - diff) : (addend + diff);
+ addend = adjust_by_scaled_ppm(priv->default_addend, scaled_ppm);
write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_config_addend(priv, priv->ptpaddr, addend);
@@ -269,7 +260,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
.n_pins = 0,
.pps = 0,
- .adjfreq = stmmac_adjust_freq,
+ .adjfine = stmmac_adjust_freq,
.adjtime = stmmac_adjust_time,
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0aca193d9550..4ef05bad4613 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -90,8 +90,6 @@
#include <linux/uaccess.h>
#include <linux/jiffies.h>
-#define cas_page_map(x) kmap_atomic((x))
-#define cas_page_unmap(x) kunmap_atomic((x))
#define CAS_NCPUS num_online_cpus()
#define cas_skb_release(x) netif_rx(x)
@@ -1915,7 +1913,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
int off, swivel = RX_SWIVEL_OFF_VAL;
struct cas_page *page;
struct sk_buff *skb;
- void *addr, *crcaddr;
+ void *crcaddr;
__sum16 csum;
char *p;
@@ -1936,7 +1934,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb_reserve(skb, swivel);
p = skb->data;
- addr = crcaddr = NULL;
+ crcaddr = NULL;
if (hlen) { /* always copy header pages */
i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
@@ -1948,12 +1946,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i += cp->crc_size;
dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
i, DMA_FROM_DEVICE);
- addr = cas_page_map(page->buffer);
- memcpy(p, addr + off, i);
+ memcpy(p, page_address(page->buffer) + off, i);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr + off, i,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
RX_USED_ADD(page, 0x100);
p += hlen;
swivel = 0;
@@ -1984,12 +1980,11 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
/* make sure we always copy a header */
swivel = 0;
if (p == (char *) skb->data) { /* not split */
- addr = cas_page_map(page->buffer);
- memcpy(p, addr + off, RX_COPY_MIN);
+ memcpy(p, page_address(page->buffer) + off,
+ RX_COPY_MIN);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr + off, i,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
off += RX_COPY_MIN;
swivel = RX_COPY_MIN;
RX_USED_ADD(page, cp->mtu_stride);
@@ -2036,10 +2031,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
RX_USED_ADD(page, hlen + cp->crc_size);
}
- if (cp->crc_size) {
- addr = cas_page_map(page->buffer);
- crcaddr = addr + off + hlen;
- }
+ if (cp->crc_size)
+ crcaddr = page_address(page->buffer) + off + hlen;
} else {
/* copying packet */
@@ -2061,12 +2054,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i += cp->crc_size;
dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
i, DMA_FROM_DEVICE);
- addr = cas_page_map(page->buffer);
- memcpy(p, addr + off, i);
+ memcpy(p, page_address(page->buffer) + off, i);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr + off, i,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
if (p == (char *) skb->data) /* not split */
RX_USED_ADD(page, cp->mtu_stride);
else
@@ -2081,20 +2072,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
page->dma_addr,
dlen + cp->crc_size,
DMA_FROM_DEVICE);
- addr = cas_page_map(page->buffer);
- memcpy(p, addr, dlen + cp->crc_size);
+ memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr,
dlen + cp->crc_size,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
RX_USED_ADD(page, dlen + cp->crc_size);
}
end_copy_pkt:
- if (cp->crc_size) {
- addr = NULL;
+ if (cp->crc_size)
crcaddr = skb->data + alloclen;
- }
+
skb_put(skb, alloclen);
}
@@ -2103,8 +2091,6 @@ end_copy_pkt:
/* checksum includes FCS. strip it out. */
csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
csum_unfold(csum)));
- if (addr)
- cas_page_unmap(addr);
}
skb->protocol = eth_type_trans(skb, cp->dev);
if (skb->protocol == htons(ETH_P_IP)) {
@@ -2793,18 +2779,14 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
if (unlikely(tabort)) {
- void *addr;
-
/* NOTE: len is always > tabort */
cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
-
- addr = cas_page_map(skb_frag_page(fragp));
- memcpy(tx_tiny_buf(cp, ring, entry),
- addr + skb_frag_off(fragp) + len - tabort,
- tabort);
- cas_page_unmap(addr);
+ memcpy_from_page(tx_tiny_buf(cp, ring, entry),
+ skb_frag_page(fragp),
+ skb_frag_off(fragp) + len - tabort,
+ tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
len = tabort;
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 80fde5f06fce..a6211b95ed17 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1085,13 +1085,13 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
u8 *vaddr;
if (nc < ncookies) {
- vaddr = kmap_atomic(skb_frag_page(f));
+ vaddr = kmap_local_page(skb_frag_page(f));
blen = skb_frag_size(f);
blen += 8 - (blen & 7);
err = ldc_map_single(lp, vaddr + skb_frag_off(f),
blen, cookies + nc, ncookies - nc,
map_perm);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
} else {
err = -EMSGSIZE;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b3b0ba842541..de112ab3195c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/rtnetlink.h>
#include <linux/mfd/syscon.h>
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
@@ -555,11 +556,26 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret, i;
+ u32 reg;
ret = pm_runtime_resume_and_get(common->dev);
if (ret < 0)
return ret;
+ /* Idle MAC port */
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+ cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ /* soft reset MAC */
+ cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
+ mdelay(1);
+ reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
+ if (reg) {
+ dev_err(common->dev, "soft RESET didn't complete\n");
+ return -ETIMEDOUT;
+ }
+
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
if (ret) {
@@ -1362,12 +1378,12 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1380,13 +1396,6 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
-static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
-
- return &port->devlink_port;
-}
-
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
@@ -1400,7 +1409,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
- .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1479,7 +1487,6 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy
}
static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
- .validate = phylink_generic_validate,
.mac_config = am65_cpsw_nuss_mac_config,
.mac_link_down = am65_cpsw_nuss_mac_link_down,
.mac_link_up = am65_cpsw_nuss_mac_link_up,
@@ -1542,6 +1549,32 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
}
}
+static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ int i, ret = 0;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
+
+ ret = devm_request_irq(dev, tx_chn->irq,
+ am65_cpsw_nuss_tx_irq,
+ IRQF_TRIGGER_HIGH,
+ tx_chn->tx_chn_name, tx_chn);
+ if (ret) {
+ dev_err(dev, "failure requesting tx%u irq %u, %d\n",
+ tx_chn->id, tx_chn->irq, ret);
+ goto err;
+ }
+ }
+
+err:
+ return ret;
+}
+
static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
{
u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
@@ -1608,6 +1641,12 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
dev_name(dev), tx_chn->id);
}
+ ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
+ if (ret) {
+ dev_err(dev, "Failed to add tx NAPI %d\n", ret);
+ goto err;
+ }
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
if (i) {
@@ -1632,6 +1671,29 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
+static void am65_cpsw_nuss_remove_rx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ struct am65_cpsw_rx_chn *rx_chn;
+ struct device *dev = common->dev;
+
+ rx_chn = &common->rx_chns;
+ devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+
+ if (!(rx_chn->irq < 0))
+ devm_free_irq(dev, rx_chn->irq, common);
+
+ netif_napi_del(&common->napi_rx);
+
+ if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+
+ common->rx_flow_id_base = -1;
+}
+
static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
@@ -1719,6 +1781,18 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
}
}
+ netif_napi_add(common->dma_ndev, &common->napi_rx,
+ am65_cpsw_nuss_rx_poll);
+
+ ret = devm_request_irq(dev, rx_chn->irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting rx irq %u, %d\n",
+ rx_chn->irq, ret);
+ goto err;
+ }
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (i) {
@@ -1990,6 +2064,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->slave.phylink_config.dev = &port->ndev->dev;
port->slave.phylink_config.type = PHYLINK_NETDEV;
port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
@@ -2043,35 +2118,6 @@ static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
return ret;
}
- netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll);
-
- return ret;
-}
-
-static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
-{
- struct device *dev = common->dev;
- int i, ret = 0;
-
- for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
-
- netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll);
-
- ret = devm_request_irq(dev, tx_chn->irq,
- am65_cpsw_nuss_tx_irq,
- IRQF_TRIGGER_HIGH,
- tx_chn->tx_chn_name, tx_chn);
- if (ret) {
- dev_err(dev, "failure requesting tx%u irq %u, %d\n",
- tx_chn->id, tx_chn->irq, ret);
- goto err;
- }
- }
-
-err:
return ret;
}
@@ -2534,22 +2580,16 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
- struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int ret = 0, i;
- ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
+ /* init tx channels */
+ ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
return ret;
-
- ret = devm_request_irq(dev, common->rx_chns.irq,
- am65_cpsw_nuss_rx_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), common);
- if (ret) {
- dev_err(dev, "failure requesting rx irq %u, %d\n",
- common->rx_chns.irq, ret);
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
return ret;
- }
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
@@ -2561,15 +2601,14 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (!port->ndev)
continue;
+ SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port);
+
ret = register_netdev(port->ndev);
if (ret) {
dev_err(dev, "error registering slave net device%i %d\n",
i, ret);
goto err_cleanup_ndev;
}
-
- dl_port = &port->devlink_port;
- devlink_port_type_eth_set(dl_port, port->ndev);
}
ret = am65_cpsw_register_notifiers(common);
@@ -2595,10 +2634,8 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
common->tx_ch_num = num_tx;
ret = am65_cpsw_nuss_init_tx_chns(common);
- if (ret)
- return ret;
- return am65_cpsw_nuss_ndev_add_tx_napi(common);
+ return ret;
}
struct am65_cpsw_soc_pdata {
@@ -2675,6 +2712,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct clk *clk;
u64 id_temp;
int ret, i;
+ int ale_entries;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
@@ -2747,14 +2785,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
am65_cpsw_nuss_get_ver(common);
- /* init tx channels */
- ret = am65_cpsw_nuss_init_tx_chns(common);
- if (ret)
- goto err_of_clear;
- ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
- goto err_of_clear;
-
ret = am65_cpsw_nuss_init_host_p(common);
if (ret)
goto err_of_clear;
@@ -2778,6 +2808,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
goto err_of_clear;
}
+ ale_entries = common->ale->params.ale_entries;
+ common->ale_context = devm_kzalloc(dev,
+ ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
+ GFP_KERNEL);
ret = am65_cpsw_init_cpts(common);
if (ret)
goto err_of_clear;
@@ -2839,10 +2873,103 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int am65_cpsw_nuss_suspend(struct device *dev)
+{
+ struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ int i, ret;
+
+ cpsw_ale_dump(common->ale, common->ale_context);
+ host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ ndev = port->ndev;
+
+ if (!ndev)
+ continue;
+
+ port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ netif_device_detach(ndev);
+ if (netif_running(ndev)) {
+ rtnl_lock();
+ ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
+ rtnl_unlock();
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ am65_cpts_suspend(common->cpts);
+
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_resume(struct device *dev)
+{
+ struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ int i, ret;
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ return ret;
+
+ /* If RX IRQ was disabled before suspend, keep it disabled */
+ if (common->rx_irq_disabled)
+ disable_irq(common->rx_chns.irq);
+
+ am65_cpts_resume(common->cpts);
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ ndev = port->ndev;
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ rtnl_lock();
+ ret = am65_cpsw_nuss_ndo_slave_open(ndev);
+ rtnl_unlock();
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ }
+
+ netif_device_attach(ndev);
+ writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ }
+
+ writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ cpsw_ale_restore(common->ale, common->ale_context);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
+};
+
static struct platform_driver am65_cpsw_nuss_driver = {
.driver = {
.name = AM65_CPSW_DRV_NAME,
.of_match_table = am65_cpsw_nuss_of_mtable,
+ .pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
.remove = am65_cpsw_nuss_remove,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 2c9850fdfcb6..4b75620f8d28 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -55,12 +55,16 @@ struct am65_cpsw_port {
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
+ /* Only for suspend resume context */
+ u32 vid_context;
};
struct am65_cpsw_host {
struct am65_cpsw_common *common;
void __iomem *port_base;
void __iomem *stat_base;
+ /* Only for suspend resume context */
+ u32 vid_context;
};
struct am65_cpsw_tx_chn {
@@ -145,6 +149,8 @@ struct am65_cpsw_common {
struct net_device *hw_bridge_dev;
struct notifier_block am65_cpsw_netdevice_nb;
unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
+ /* only for suspend/resume context restore */
+ u32 *ale_context;
};
struct am65_cpsw_ndev_stats {
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index e2f0fb286143..9535396b28cd 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -176,6 +176,16 @@ struct am65_cpts {
u32 genf_enable;
u32 hw_ts_enable;
struct sk_buff_head txq;
+ /* context save/restore */
+ u64 sr_cpts_ns;
+ u64 sr_ktime_ns;
+ u32 sr_control;
+ u32 sr_int_enable;
+ u32 sr_rftclk_sel;
+ u32 sr_ts_ppm_hi;
+ u32 sr_ts_ppm_low;
+ struct am65_genf_regs sr_genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs sr_estf[AM65_CPTS_ESTF_MAX_NUM];
};
struct am65_cpts_skb_cb_data {
@@ -381,9 +391,10 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
}
/* PTP clock operations */
-static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
int neg_adj = 0;
u64 adj_period;
u32 val;
@@ -615,7 +626,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
static struct ptp_clock_info am65_ptp_info = {
.owner = THIS_MODULE,
.name = "CTPS timer",
- .adjfreq = am65_cpts_ptp_adjfreq,
+ .adjfine = am65_cpts_ptp_adjfine,
.adjtime = am65_cpts_ptp_adjtime,
.gettimex64 = am65_cpts_ptp_gettimex,
.settime64 = am65_cpts_ptp_settime,
@@ -1029,6 +1040,72 @@ refclk_disable:
}
EXPORT_SYMBOL_GPL(am65_cpts_create);
+void am65_cpts_suspend(struct am65_cpts *cpts)
+{
+ /* save state and disable CPTS */
+ cpts->sr_control = am65_cpts_read32(cpts, control);
+ cpts->sr_int_enable = am65_cpts_read32(cpts, int_enable);
+ cpts->sr_rftclk_sel = am65_cpts_read32(cpts, rftclk_sel);
+ cpts->sr_ts_ppm_hi = am65_cpts_read32(cpts, ts_ppm_hi);
+ cpts->sr_ts_ppm_low = am65_cpts_read32(cpts, ts_ppm_low);
+ cpts->sr_cpts_ns = am65_cpts_gettime(cpts, NULL);
+ cpts->sr_ktime_ns = ktime_to_ns(ktime_get_real());
+ am65_cpts_disable(cpts);
+ clk_disable(cpts->refclk);
+
+ /* Save GENF state */
+ memcpy_fromio(&cpts->sr_genf, &cpts->reg->genf, sizeof(cpts->sr_genf));
+
+ /* Save ESTF state */
+ memcpy_fromio(&cpts->sr_estf, &cpts->reg->estf, sizeof(cpts->sr_estf));
+}
+EXPORT_SYMBOL_GPL(am65_cpts_suspend);
+
+void am65_cpts_resume(struct am65_cpts *cpts)
+{
+ int i;
+ s64 ktime_ns;
+
+ /* restore state and enable CPTS */
+ clk_enable(cpts->refclk);
+ am65_cpts_write32(cpts, cpts->sr_rftclk_sel, rftclk_sel);
+ am65_cpts_set_add_val(cpts);
+ am65_cpts_write32(cpts, cpts->sr_control, control);
+ am65_cpts_write32(cpts, cpts->sr_int_enable, int_enable);
+
+ /* Restore time to saved CPTS time + time in suspend/resume */
+ ktime_ns = ktime_to_ns(ktime_get_real());
+ ktime_ns -= cpts->sr_ktime_ns;
+ am65_cpts_settime(cpts, cpts->sr_cpts_ns + ktime_ns);
+
+ /* Restore compensation (PPM) */
+ am65_cpts_write32(cpts, cpts->sr_ts_ppm_hi, ts_ppm_hi);
+ am65_cpts_write32(cpts, cpts->sr_ts_ppm_low, ts_ppm_low);
+
+ /* Restore GENF state */
+ for (i = 0; i < AM65_CPTS_GENF_MAX_NUM; i++) {
+ am65_cpts_write32(cpts, 0, genf[i].length); /* TRM sequence */
+ am65_cpts_write32(cpts, cpts->sr_genf[i].comp_hi, genf[i].comp_hi);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].comp_lo, genf[i].comp_lo);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].length, genf[i].length);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].control, genf[i].control);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_hi, genf[i].ppm_hi);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_low, genf[i].ppm_low);
+ }
+
+ /* Restore ESTTF state */
+ for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
+ am65_cpts_write32(cpts, 0, estf[i].length); /* TRM sequence */
+ am65_cpts_write32(cpts, cpts->sr_estf[i].comp_hi, estf[i].comp_hi);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].comp_lo, estf[i].comp_lo);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].length, estf[i].length);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].control, estf[i].control);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_hi, estf[i].ppm_hi);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_low, estf[i].ppm_low);
+ }
+}
+EXPORT_SYMBOL_GPL(am65_cpts_resume);
+
static int am65_cpts_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index cf9fbc28fd03..bd08f4b2edd2 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -28,6 +28,8 @@ u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+void am65_cpts_suspend(struct am65_cpts *cpts);
+void am65_cpts_resume(struct am65_cpts *cpts);
#else
static inline struct am65_cpts *am65_cpts_create(struct device *dev,
void __iomem *regs,
@@ -69,6 +71,14 @@ static inline int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
static inline void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
{
}
+
+static inline void am65_cpts_suspend(struct am65_cpts *cpts)
+{
+}
+
+static inline void am65_cpts_resume(struct am65_cpts *cpts)
+{
+}
#endif
#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 231370e9a801..0c5e783e574c 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1452,6 +1452,16 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
}
}
+void cpsw_ale_restore(struct cpsw_ale *ale, u32 *data)
+{
+ int i;
+
+ for (i = 0; i < ale->params.ale_entries; i++) {
+ cpsw_ale_write(ale, i, data);
+ data += ALE_ENTRY_WORDS;
+ }
+}
+
u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index aba4572cfa3b..6779ee111d57 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -127,6 +127,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+void cpsw_ale_restore(struct cpsw_ale *ale, u32 *data);
u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale);
static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 92ca739fac01..bcccf43d368b 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -213,25 +213,13 @@ static void cpts_update_cur_time(struct cpts *cpts, int match,
/* PTP clock operations */
-static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct cpts *cpts = container_of(ptp, struct cpts, info);
- int neg_adj = 0;
- u32 diff, mult;
- u64 adj;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- mult = cpts->cc_mult;
- adj = mult;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
mutex_lock(&cpts->ptp_clk_mutex);
- cpts->mult_new = neg_adj ? mult - diff : mult + diff;
+ cpts->mult_new = adjust_by_scaled_ppm(cpts->cc_mult, scaled_ppm);
cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
@@ -435,7 +423,7 @@ static const struct ptp_clock_info cpts_info = {
.n_ext_ts = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = cpts_ptp_adjfreq,
+ .adjfine = cpts_ptp_adjfine,
.adjtime = cpts_ptp_adjtime,
.gettimex64 = cpts_ptp_gettimeex,
.settime64 = cpts_ptp_settime,
@@ -794,7 +782,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts_calc_mult_shift(cpts);
/* save cc.mult original value as it can be modified
- * by cpts_ptp_adjfreq().
+ * by cpts_ptp_adjfine().
*/
cpts->cc_mult = cpts->cc.mult;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index aba70bef4894..1bb596a9d8a2 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1261,7 +1261,7 @@ out:
}
/* Submit the packet */
-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_stats *tx_stats = &netcp->stats;
@@ -1916,16 +1916,16 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp_rx);
+ start = u64_stats_fetch_begin(&p->syncp_rx);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
+ } while (u64_stats_fetch_retry(&p->syncp_rx, start));
do {
- start = u64_stats_fetch_begin_irq(&p->syncp_tx);
+ start = u64_stats_fetch_begin(&p->syncp_tx);
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
+ } while (u64_stats_fetch_retry(&p->syncp_tx, start));
stats->rx_packets = rxpackets;
stats->rx_bytes = rxbytes;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 0fb15a17b547..d716e6fe26e1 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2217,16 +2217,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
+ start = u64_stats_fetch_begin(&rp->rx_stats.syncp);
stats->rx_packets = rp->rx_stats.packets;
stats->rx_bytes = rp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
+ start = u64_stats_fetch_begin(&rp->tx_stats.syncp);
stats->tx_packets = rp->tx_stats.packets;
stats->tx_bytes = rp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start));
}
static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index f5d43d8c9629..86310588c6c1 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -16,9 +16,15 @@ config NET_VENDOR_WANGXUN
if NET_VENDOR_WANGXUN
+config LIBWX
+ tristate
+ help
+ Common library for Wangxun(R) Ethernet drivers.
+
config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
+ select LIBWX
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
@@ -32,6 +38,7 @@ config NGBE
config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
+ select LIBWX
help
This driver supports Wangxun(R) 10GbE PCI Express family of
adapters.
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
index ac3fb06b233c..ca19311dbe38 100644
--- a/drivers/net/ethernet/wangxun/Makefile
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -3,5 +3,6 @@
# Makefile for the Wangxun network device drivers.
#
+obj-$(CONFIG_LIBWX) += libwx/
obj-$(CONFIG_TXGBE) += txgbe/
obj-$(CONFIG_NGBE) += ngbe/
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
new file mode 100644
index 000000000000..1ed5e23af944
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+
+obj-$(CONFIG_LIBWX) += libwx.o
+
+libwx-objs := wx_hw.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
new file mode 100644
index 000000000000..c57dc3238b3f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -0,0 +1,936 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+
+static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask)
+{
+ u32 mask;
+
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ wr32(wxhw, WX_PX_IMS(0), mask);
+
+ if (wxhw->mac.type == wx_mac_sp) {
+ mask = (qmask >> 32);
+ if (mask)
+ wr32(wxhw, WX_PX_IMS(1), mask);
+ }
+}
+
+/* cmd_addr is used for some special command:
+ * 1. to be sector address, when implemented erase sector command
+ * 2. to be flash address when implemented read, write flash address
+ */
+static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr)
+{
+ u32 cmd_val = 0, val = 0;
+
+ cmd_val = WX_SPI_CMD_CMD(cmd) |
+ WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
+ cmd_addr;
+ wr32(wxhw, WX_SPI_CMD, cmd_val);
+
+ return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
+ false, wxhw, WX_SPI_STATUS);
+}
+
+static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data)
+{
+ int ret = 0;
+
+ ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr);
+ if (ret < 0)
+ return ret;
+
+ *data = rd32(wxhw, WX_SPI_DATA);
+
+ return ret;
+}
+
+int wx_check_flash_load(struct wx_hw *hw, u32 check_bit)
+{
+ u32 reg = 0;
+ int err = 0;
+
+ /* if there's flash existing */
+ if (!(rd32(hw, WX_SPI_STATUS) &
+ WX_SPI_STATUS_FLASH_BYPASS)) {
+ /* wait hw load flash done */
+ err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000,
+ false, hw, WX_SPI_ILDR_STATUS);
+ if (err < 0)
+ wx_err(hw, "Check flash load timeout.\n");
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(wx_check_flash_load);
+
+void wx_control_hw(struct wx_hw *wxhw, bool drv)
+{
+ if (drv) {
+ /* Let firmware know the driver has taken over */
+ wr32m(wxhw, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_DRV_LOAD, WX_CFG_PORT_CTL_DRV_LOAD);
+ } else {
+ /* Let firmware take over control of hw */
+ wr32m(wxhw, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_DRV_LOAD, 0);
+ }
+}
+EXPORT_SYMBOL(wx_control_hw);
+
+/**
+ * wx_mng_present - returns 0 when management capability is present
+ * @wxhw: pointer to hardware structure
+ */
+int wx_mng_present(struct wx_hw *wxhw)
+{
+ u32 fwsm;
+
+ fwsm = rd32(wxhw, WX_MIS_ST);
+ if (fwsm & WX_MIS_ST_MNG_INIT_DN)
+ return 0;
+ else
+ return -EACCES;
+}
+EXPORT_SYMBOL(wx_mng_present);
+
+/* Software lock to be held while software semaphore is being accessed. */
+static DEFINE_MUTEX(wx_sw_sync_lock);
+
+/**
+ * wx_release_sw_sync - Release SW semaphore
+ * @wxhw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SW semaphore for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+static void wx_release_sw_sync(struct wx_hw *wxhw, u32 mask)
+{
+ mutex_lock(&wx_sw_sync_lock);
+ wr32m(wxhw, WX_MNG_SWFW_SYNC, mask, 0);
+ mutex_unlock(&wx_sw_sync_lock);
+}
+
+/**
+ * wx_acquire_sw_sync - Acquire SW semaphore
+ * @wxhw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SW semaphore for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
+{
+ u32 sem = 0;
+ int ret = 0;
+
+ mutex_lock(&wx_sw_sync_lock);
+ ret = read_poll_timeout(rd32, sem, !(sem & mask),
+ 5000, 2000000, false, wxhw, WX_MNG_SWFW_SYNC);
+ if (!ret) {
+ sem |= mask;
+ wr32(wxhw, WX_MNG_SWFW_SYNC, sem);
+ } else {
+ wx_err(wxhw, "SW Semaphore not granted: 0x%x.\n", sem);
+ }
+ mutex_unlock(&wx_sw_sync_lock);
+
+ return ret;
+}
+
+/**
+ * wx_host_interface_command - Issue command to manageability block
+ * @wxhw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ **/
+int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ u32 hdr_size = sizeof(struct wx_hic_hdr);
+ u32 hicr, i, bi, buf[64] = {};
+ int status = 0;
+ u32 dword_len;
+ u16 buf_len;
+
+ if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
+ wx_err(wxhw, "Buffer length failure buffersize=%d.\n", length);
+ return -EINVAL;
+ }
+
+ status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB);
+ if (status != 0)
+ return status;
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ wx_err(wxhw, "Buffer length failure, not aligned to dword");
+ status = -EINVAL;
+ goto rel_out;
+ }
+
+ dword_len = length >> 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(wxhw, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ /* write flush */
+ buf[i] = rd32a(wxhw, WX_MNG_MBOX, i);
+ }
+ /* Setting this bit tells the ARC that a new command is pending. */
+ wr32m(wxhw, WX_MNG_MBOX_CTL,
+ WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
+
+ status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
+ timeout * 1000, false, wxhw, WX_MNG_MBOX_CTL);
+
+ /* Check command completion */
+ if (status) {
+ wx_dbg(wxhw, "Command has failed with no status valid.\n");
+
+ buf[0] = rd32(wxhw, WX_MNG_MBOX);
+ if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
+ status = -EINVAL;
+ goto rel_out;
+ }
+ if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+ wx_dbg(wxhw, "It's unknown cmd.\n");
+ status = -EINVAL;
+ goto rel_out;
+ }
+
+ wx_dbg(wxhw, "write value:\n");
+ for (i = 0; i < dword_len; i++)
+ wx_dbg(wxhw, "%x ", buffer[i]);
+ wx_dbg(wxhw, "read value:\n");
+ for (i = 0; i < dword_len; i++)
+ wx_dbg(wxhw, "%x ", buf[i]);
+ }
+
+ if (!return_data)
+ goto rel_out;
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi);
+ le32_to_cpus(&buffer[bi]);
+ }
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct wx_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ wx_err(wxhw, "Buffer not large enough for reply message.\n");
+ status = -EFAULT;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off) */
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi);
+ le32_to_cpus(&buffer[bi]);
+ }
+
+rel_out:
+ wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB);
+ return status;
+}
+EXPORT_SYMBOL(wx_host_interface_command);
+
+/**
+ * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
+ * assuming that the semaphore is already obtained.
+ * @wxhw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data)
+{
+ struct wx_hic_read_shadow_ram buffer;
+ int status;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = (__force u32)cpu_to_be32(offset * 2);
+ /* one word */
+ buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
+
+ status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, false);
+
+ if (status != 0)
+ return status;
+
+ *data = (u16)rd32a(wxhw, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+
+ return status;
+}
+
+/**
+ * wx_read_ee_hostif - Read EEPROM word using a host interface cmd
+ * @wxhw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data)
+{
+ int status = 0;
+
+ status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ if (status == 0) {
+ status = wx_read_ee_hostif_data(wxhw, offset, data);
+ wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(wx_read_ee_hostif);
+
+/**
+ * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
+ * @wxhw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
+ u16 offset, u16 words, u16 *data)
+{
+ struct wx_hic_read_shadow_ram buffer;
+ u32 current_word = 0;
+ u16 words_to_read;
+ u32 value = 0;
+ int status;
+ u32 i;
+
+ /* Take semaphore for the entire operation. */
+ status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ if (status != 0)
+ return status;
+
+ while (words) {
+ if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+ else
+ words_to_read = words;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
+ buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
+
+ status = wx_host_interface_command(wxhw, (u32 *)&buffer,
+ sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT,
+ false);
+
+ if (status != 0) {
+ wx_err(wxhw, "Host interface command failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words_to_read; i++) {
+ u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
+
+ value = rd32(wxhw, reg);
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ i++;
+ if (i < words_to_read) {
+ value >>= 16;
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ }
+ }
+ words -= words_to_read;
+ }
+
+out:
+ wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ return status;
+}
+EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
+
+/**
+ * wx_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 wx_calculate_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u32 i;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8)(0 - sum);
+}
+
+/**
+ * wx_reset_hostif - send reset cmd to fw
+ * @wxhw: pointer to hardware structure
+ *
+ * Sends reset cmd to firmware through the manageability
+ * block.
+ **/
+int wx_reset_hostif(struct wx_hw *wxhw)
+{
+ struct wx_hic_reset reset_cmd;
+ int ret_val = 0;
+ int i;
+
+ reset_cmd.hdr.cmd = FW_RESET_CMD;
+ reset_cmd.hdr.buf_len = FW_RESET_LEN;
+ reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ reset_cmd.lan_id = wxhw->bus.func;
+ reset_cmd.reset_type = (u16)wxhw->reset_type;
+ reset_cmd.hdr.checksum = 0;
+ reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
+ (FW_CEM_HDR_LEN +
+ reset_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = wx_host_interface_command(wxhw, (u32 *)&reset_cmd,
+ sizeof(reset_cmd),
+ WX_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != 0)
+ continue;
+
+ if (reset_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+ ret_val = -EFAULT;
+
+ break;
+ }
+
+ return ret_val;
+}
+EXPORT_SYMBOL(wx_reset_hostif);
+
+/**
+ * wx_init_eeprom_params - Initialize EEPROM params
+ * @wxhw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters wx_eeprom_info within the
+ * wx_hw struct in order to set up EEPROM access.
+ **/
+void wx_init_eeprom_params(struct wx_hw *wxhw)
+{
+ struct wx_eeprom_info *eeprom = &wxhw->eeprom;
+ u16 eeprom_size;
+ u16 data = 0x80;
+
+ if (eeprom->type == wx_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = wx_eeprom_none;
+
+ if (!(rd32(wxhw, WX_SPI_STATUS) &
+ WX_SPI_STATUS_FLASH_BYPASS)) {
+ eeprom->type = wx_flash;
+
+ eeprom_size = 4096;
+ eeprom->word_size = eeprom_size >> 1;
+
+ wx_dbg(wxhw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+ }
+
+ if (wxhw->mac.type == wx_mac_sp) {
+ if (wx_read_ee_hostif(wxhw, WX_SW_REGION_PTR, &data)) {
+ wx_err(wxhw, "NVM Read Error\n");
+ return;
+ }
+ data = data >> 1;
+ }
+
+ eeprom->sw_region_offset = data;
+}
+EXPORT_SYMBOL(wx_init_eeprom_params);
+
+/**
+ * wx_get_mac_addr - Generic get MAC address
+ * @wxhw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0);
+ rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H);
+ rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L);
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
+}
+EXPORT_SYMBOL(wx_get_mac_addr);
+
+/**
+ * wx_set_rar - Set Rx address register
+ * @wxhw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @pools: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools,
+ u32 enable_addr)
+{
+ u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_low, rar_high;
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ wx_err(wxhw, "RAR index %d is out of range.\n", index);
+ return -EINVAL;
+ }
+
+ /* select the MAC address */
+ wr32(wxhw, WX_PSR_MAC_SWC_IDX, index);
+
+ /* setup VMDq pool mapping */
+ wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
+ if (wxhw->mac.type == wx_mac_sp)
+ wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32);
+
+ /* HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ *
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_low = ((u32)addr[5] |
+ ((u32)addr[4] << 8) |
+ ((u32)addr[3] << 16) |
+ ((u32)addr[2] << 24));
+ rar_high = ((u32)addr[1] |
+ ((u32)addr[0] << 8));
+ if (enable_addr != 0)
+ rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
+
+ wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low);
+ wr32m(wxhw, WX_PSR_MAC_SWC_AD_H,
+ (WX_PSR_MAC_SWC_AD_H_AD(~0) |
+ WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+ WX_PSR_MAC_SWC_AD_H_AV),
+ rar_high);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rar);
+
+/**
+ * wx_clear_rar - Remove Rx address register
+ * @wxhw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+int wx_clear_rar(struct wx_hw *wxhw, u32 index)
+{
+ u32 rar_entries = wxhw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ wx_err(wxhw, "RAR index %d is out of range.\n", index);
+ return -EINVAL;
+ }
+
+ /* Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ wr32(wxhw, WX_PSR_MAC_SWC_IDX, index);
+
+ wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0);
+ wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0);
+
+ wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0);
+ wr32m(wxhw, WX_PSR_MAC_SWC_AD_H,
+ (WX_PSR_MAC_SWC_AD_H_AD(~0) |
+ WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+ WX_PSR_MAC_SWC_AD_H_AV),
+ 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_clear_rar);
+
+/**
+ * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
+ * @wxhw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq)
+{
+ u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 mpsar_lo, mpsar_hi;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ wx_err(wxhw, "RAR index %d is out of range.\n", rar);
+ return -EINVAL;
+ }
+
+ wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar);
+ mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L);
+ mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H);
+
+ if (!mpsar_lo && !mpsar_hi)
+ return 0;
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ wx_clear_rar(wxhw, rar);
+
+ return 0;
+}
+
+/**
+ * wx_init_uta_tables - Initialize the Unicast Table Array
+ * @wxhw: pointer to hardware structure
+ **/
+static void wx_init_uta_tables(struct wx_hw *wxhw)
+{
+ int i;
+
+ wx_dbg(wxhw, " Clearing UTA\n");
+
+ for (i = 0; i < 128; i++)
+ wr32(wxhw, WX_PSR_UC_TBL(i), 0);
+}
+
+/**
+ * wx_init_rx_addrs - Initializes receive address filters.
+ * @wxhw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+void wx_init_rx_addrs(struct wx_hw *wxhw)
+{
+ u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 psrctl;
+ int i;
+
+ /* If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (!is_valid_ether_addr(wxhw->mac.addr)) {
+ /* Get the MAC address from the RAR0 for later reference */
+ wx_get_mac_addr(wxhw, wxhw->mac.addr);
+ wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr);
+ } else {
+ /* Setup the receive address. */
+ wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n");
+ wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr);
+
+ wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
+
+ if (wxhw->mac.type == wx_mac_sp) {
+ /* clear VMDq pool/queue selection for RAR 0 */
+ wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL);
+ }
+ }
+
+ /* Zero out the other receive addresses. */
+ wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ wr32(wxhw, WX_PSR_MAC_SWC_IDX, i);
+ wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0);
+ wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0);
+ }
+
+ /* Clear the MTA */
+ wxhw->addr_ctrl.mta_in_use = 0;
+ psrctl = rd32(wxhw, WX_PSR_CTL);
+ psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
+ psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
+ wr32(wxhw, WX_PSR_CTL, psrctl);
+ wx_dbg(wxhw, " Clearing MTA\n");
+ for (i = 0; i < wxhw->mac.mcft_size; i++)
+ wr32(wxhw, WX_PSR_MC_TBL(i), 0);
+
+ wx_init_uta_tables(wxhw);
+}
+EXPORT_SYMBOL(wx_init_rx_addrs);
+
+void wx_disable_rx(struct wx_hw *wxhw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = rd32(wxhw, WX_RDB_PB_CTL);
+ if (rxctrl & WX_RDB_PB_CTL_RXEN) {
+ pfdtxgswc = rd32(wxhw, WX_PSR_CTL);
+ if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
+ pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
+ wr32(wxhw, WX_PSR_CTL, pfdtxgswc);
+ wxhw->mac.set_lben = true;
+ } else {
+ wxhw->mac.set_lben = false;
+ }
+ rxctrl &= ~WX_RDB_PB_CTL_RXEN;
+ wr32(wxhw, WX_RDB_PB_CTL, rxctrl);
+
+ if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
+ /* disable mac receiver */
+ wr32m(wxhw, WX_MAC_RX_CFG,
+ WX_MAC_RX_CFG_RE, 0);
+ }
+ }
+}
+EXPORT_SYMBOL(wx_disable_rx);
+
+/**
+ * wx_disable_pcie_master - Disable PCI-express master access
+ * @wxhw: pointer to hardware structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+int wx_disable_pcie_master(struct wx_hw *wxhw)
+{
+ int status = 0;
+ u32 val;
+
+ /* Always set this bit to ensure any future transactions are blocked */
+ pci_clear_master(wxhw->pdev);
+
+ /* Exit if master requests are blocked */
+ if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING)))
+ return 0;
+
+ /* Poll for master request bit to clear */
+ status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
+ false, wxhw, WX_PX_TRANSACTION_PENDING);
+ if (status < 0)
+ wx_err(wxhw, "PCIe transaction pending bit did not clear.\n");
+
+ return status;
+}
+EXPORT_SYMBOL(wx_disable_pcie_master);
+
+/**
+ * wx_stop_adapter - Generic stop Tx/Rx units
+ * @wxhw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+int wx_stop_adapter(struct wx_hw *wxhw)
+{
+ u16 i;
+
+ /* Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ wxhw->adapter_stopped = true;
+
+ /* Disable the receive unit */
+ wx_disable_rx(wxhw);
+
+ /* Set interrupt mask to stop interrupts from being generated */
+ wx_intr_disable(wxhw, WX_INTR_ALL);
+
+ /* Clear any pending interrupts, flush previous writes */
+ wr32(wxhw, WX_PX_MISC_IC, 0xffffffff);
+ wr32(wxhw, WX_BME_CTL, 0x3);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < wxhw->mac.max_tx_queues; i++) {
+ wr32m(wxhw, WX_PX_TR_CFG(i),
+ WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
+ WX_PX_TR_CFG_SWFLSH);
+ }
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < wxhw->mac.max_rx_queues; i++) {
+ wr32m(wxhw, WX_PX_RR_CFG(i),
+ WX_PX_RR_CFG_RR_EN, 0);
+ }
+
+ /* flush all queues disables */
+ WX_WRITE_FLUSH(wxhw);
+
+ /* Prevent the PCI-E bus from hanging by disabling PCI-E master
+ * access and verify no pending requests
+ */
+ return wx_disable_pcie_master(wxhw);
+}
+EXPORT_SYMBOL(wx_stop_adapter);
+
+void wx_reset_misc(struct wx_hw *wxhw)
+{
+ int i;
+
+ /* receive packets that size > 2048 */
+ wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
+
+ /* clear counters on read */
+ wr32m(wxhw, WX_MMC_CONTROL,
+ WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
+
+ wr32m(wxhw, WX_MAC_RX_FLOW_CTRL,
+ WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
+
+ wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+
+ wr32m(wxhw, WX_MIS_RST_ST,
+ WX_MIS_RST_ST_RST_INIT, 0x1E00);
+
+ /* errata 4: initialize mng flex tbl and wakeup flex tbl*/
+ wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0);
+ for (i = 0; i < 16; i++) {
+ wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0);
+ wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0);
+ wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0);
+ }
+ wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0);
+ for (i = 0; i < 16; i++) {
+ wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0);
+ wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0);
+ wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0);
+ }
+
+ /* set pause frame dst mac addr */
+ wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001);
+ wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180);
+}
+EXPORT_SYMBOL(wx_reset_misc);
+
+/**
+ * wx_get_pcie_msix_counts - Gets MSI-X vector count
+ * @wxhw: pointer to hardware structure
+ * @msix_count: number of MSI interrupts that can be obtained
+ * @max_msix_count: number of MSI interrupts that mac need
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count)
+{
+ struct pci_dev *pdev = wxhw->pdev;
+ struct device *dev = &pdev->dev;
+ int pos;
+
+ *msix_count = 1;
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!pos) {
+ dev_err(dev, "Unable to find MSI-X Capabilities\n");
+ return -EINVAL;
+ }
+ pci_read_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ msix_count);
+ *msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK;
+ /* MSI-X count is zero-based in HW */
+ *msix_count += 1;
+
+ if (*msix_count > max_msix_count)
+ *msix_count = max_msix_count;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_pcie_msix_counts);
+
+int wx_sw_init(struct wx_hw *wxhw)
+{
+ struct pci_dev *pdev = wxhw->pdev;
+ u32 ssid = 0;
+ int err = 0;
+
+ wxhw->vendor_id = pdev->vendor;
+ wxhw->device_id = pdev->device;
+ wxhw->revision_id = pdev->revision;
+ wxhw->oem_svid = pdev->subsystem_vendor;
+ wxhw->oem_ssid = pdev->subsystem_device;
+ wxhw->bus.device = PCI_SLOT(pdev->devfn);
+ wxhw->bus.func = PCI_FUNC(pdev->devfn);
+
+ if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+ wxhw->subsystem_vendor_id = pdev->subsystem_vendor;
+ wxhw->subsystem_device_id = pdev->subsystem_device;
+ } else {
+ err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid);
+ if (!err)
+ wxhw->subsystem_device_id = swab16((u16)ssid);
+
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_sw_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
new file mode 100644
index 000000000000..a0652f5e9939
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_HW_H_
+#define _WX_HW_H_
+
+int wx_check_flash_load(struct wx_hw *hw, u32 check_bit);
+void wx_control_hw(struct wx_hw *wxhw, bool drv);
+int wx_mng_present(struct wx_hw *wxhw);
+int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data);
+int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data);
+int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
+ u16 offset, u16 words, u16 *data);
+int wx_reset_hostif(struct wx_hw *wxhw);
+void wx_init_eeprom_params(struct wx_hw *wxhw);
+void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr);
+int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr);
+int wx_clear_rar(struct wx_hw *wxhw, u32 index);
+void wx_init_rx_addrs(struct wx_hw *wxhw);
+void wx_disable_rx(struct wx_hw *wxhw);
+int wx_disable_pcie_master(struct wx_hw *wxhw);
+int wx_stop_adapter(struct wx_hw *wxhw);
+void wx_reset_misc(struct wx_hw *wxhw);
+int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count);
+int wx_sw_init(struct wx_hw *wxhw);
+
+#endif /* _WX_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
new file mode 100644
index 000000000000..1cbeef8230bf
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_TYPE_H_
+#define _WX_TYPE_H_
+
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+#define WX_NCSI_SUP 0x8000
+#define WX_NCSI_MASK 0x8000
+#define WX_WOL_SUP 0x4000
+#define WX_WOL_MASK 0x4000
+
+/* MSI-X capability fields masks */
+#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
+#define WX_PCI_LINK_STATUS 0xB2
+
+/**************** Global Registers ****************************/
+/* chip control Registers */
+#define WX_MIS_PWR 0x10000
+#define WX_MIS_RST 0x1000C
+#define WX_MIS_RST_LAN_RST(_i) BIT((_i) + 1)
+#define WX_MIS_RST_SW_RST BIT(0)
+#define WX_MIS_ST 0x10028
+#define WX_MIS_ST_MNG_INIT_DN BIT(0)
+#define WX_MIS_SWSM 0x1002C
+#define WX_MIS_SWSM_SMBI BIT(0)
+#define WX_MIS_RST_ST 0x10030
+#define WX_MIS_RST_ST_RST_INI_SHIFT 8
+#define WX_MIS_RST_ST_RST_INIT (0xFF << WX_MIS_RST_ST_RST_INI_SHIFT)
+
+/* FMGR Registers */
+#define WX_SPI_CMD 0x10104
+#define WX_SPI_CMD_READ_DWORD 0x1
+#define WX_SPI_CLK_DIV 0x3
+#define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28)
+#define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25)
+#define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF))
+#define WX_SPI_DATA 0x10108
+#define WX_SPI_DATA_BYPASS BIT(31)
+#define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16)
+#define WX_SPI_DATA_OP_DONE BIT(0)
+#define WX_SPI_STATUS 0x1010C
+#define WX_SPI_STATUS_OPDONE BIT(0)
+#define WX_SPI_STATUS_FLASH_BYPASS BIT(31)
+#define WX_SPI_ILDR_STATUS 0x10120
+
+/* Sensors for PVT(Process Voltage Temperature) */
+#define WX_TS_EN 0x10304
+#define WX_TS_EN_ENA BIT(0)
+#define WX_TS_ALARM_THRE 0x1030C
+#define WX_TS_DALARM_THRE 0x10310
+#define WX_TS_INT_EN 0x10314
+#define WX_TS_INT_EN_DALARM_INT_EN BIT(1)
+#define WX_TS_INT_EN_ALARM_INT_EN BIT(0)
+#define WX_TS_ALARM_ST 0x10318
+#define WX_TS_ALARM_ST_DALARM BIT(1)
+#define WX_TS_ALARM_ST_ALARM BIT(0)
+
+/************************* Port Registers ************************************/
+/* port cfg Registers */
+#define WX_CFG_PORT_CTL 0x14400
+#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
+
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define WX_TDM_CTL 0x18000
+/* TDM CTL BIT */
+#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
+
+/***************************** RDB registers *********************************/
+/* receive packet buffer */
+#define WX_RDB_PB_CTL 0x19000
+#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */
+#define WX_RDB_PB_CTL_DISABLED BIT(0)
+/* statistic */
+#define WX_RDB_PFCMACDAL 0x19210
+#define WX_RDB_PFCMACDAH 0x19214
+
+/******************************* PSR Registers *******************************/
+/* psr control */
+#define WX_PSR_CTL 0x15000
+/* Header split receive */
+#define WX_PSR_CTL_SW_EN BIT(18)
+#define WX_PSR_CTL_RSC_ACK BIT(17)
+#define WX_PSR_CTL_RSC_DIS BIT(16)
+#define WX_PSR_CTL_PCSD BIT(13)
+#define WX_PSR_CTL_IPPCSE BIT(12)
+#define WX_PSR_CTL_BAM BIT(10)
+#define WX_PSR_CTL_UPE BIT(9)
+#define WX_PSR_CTL_MPE BIT(8)
+#define WX_PSR_CTL_MFE BIT(7)
+#define WX_PSR_CTL_MO_SHIFT 5
+#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT)
+#define WX_PSR_CTL_TPE BIT(4)
+/* mcasst/ucast overflow tbl */
+#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
+#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+
+/* Management */
+#define WX_PSR_MNG_FLEX_SEL 0x1582C
+#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16))
+#define WX_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16))
+#define WX_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16))
+#define WX_PSR_LAN_FLEX_SEL 0x15B8C
+#define WX_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16))
+#define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16))
+#define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16))
+
+/* mac switcher */
+#define WX_PSR_MAC_SWC_AD_L 0x16200
+#define WX_PSR_MAC_SWC_AD_H 0x16204
+#define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF))
+#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30)
+#define WX_PSR_MAC_SWC_AD_H_AV BIT(31)
+#define WX_PSR_MAC_SWC_VM_L 0x16208
+#define WX_PSR_MAC_SWC_VM_H 0x1620C
+#define WX_PSR_MAC_SWC_IDX 0x16210
+#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU
+
+/************************************** MNG ********************************/
+#define WX_MNG_SWFW_SYNC 0x1E008
+#define WX_MNG_SWFW_SYNC_SW_MB BIT(2)
+#define WX_MNG_SWFW_SYNC_SW_FLASH BIT(3)
+#define WX_MNG_MBOX 0x1E100
+#define WX_MNG_MBOX_CTL 0x1E044
+#define WX_MNG_MBOX_CTL_SWRDY BIT(0)
+#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
+
+/************************************* ETH MAC *****************************/
+#define WX_MAC_TX_CFG 0x11000
+#define WX_MAC_TX_CFG_TE BIT(0)
+#define WX_MAC_RX_CFG 0x11004
+#define WX_MAC_RX_CFG_RE BIT(0)
+#define WX_MAC_RX_CFG_JE BIT(8)
+#define WX_MAC_PKT_FLT 0x11008
+#define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */
+#define WX_MAC_RX_FLOW_CTRL 0x11090
+#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+#define WX_MMC_CONTROL 0x11800
+#define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */
+
+/********************************* BAR registers ***************************/
+/* Interrupt Registers */
+#define WX_BME_CTL 0x12020
+#define WX_PX_MISC_IC 0x100
+#define WX_PX_IMS(_i) (0x140 + (_i) * 4)
+#define WX_PX_TRANSACTION_PENDING 0x168
+
+/* transmit DMA Registers */
+#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40))
+/* Transmit Config masks */
+#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */
+#define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */
+#define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */
+#define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+#define WX_PX_TR_CFG_THRE_SHIFT 8
+
+/* Receive DMA Registers */
+#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
+/* PX_RR_CFG bit definitions */
+#define WX_PX_RR_CFG_RR_EN BIT(0)
+
+/* Number of 80 microseconds we wait for PCI Express master disable */
+#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
+
+/****************** Manageablility Host Interface defines ********************/
+#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
+#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
+
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 244
+#define FW_RESET_CMD 0xDF
+#define FW_RESET_LEN 0x2
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_RESERVED 0X0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+
+#define WX_SW_REGION_PTR 0x1C
+
+/* Host Interface Command Structures */
+struct wx_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct wx_hic_hdr2_req {
+ u8 cmd;
+ u8 buf_lenh;
+ u8 buf_lenl;
+ u8 checksum;
+};
+
+struct wx_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union wx_hic_hdr2 {
+ struct wx_hic_hdr2_req req;
+ struct wx_hic_hdr2_rsp rsp;
+};
+
+/* These need to be dword aligned */
+struct wx_hic_read_shadow_ram {
+ union wx_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct wx_hic_reset {
+ struct wx_hic_hdr hdr;
+ u16 lan_id;
+ u16 reset_type;
+};
+
+/* Bus parameters */
+struct wx_bus_info {
+ u8 func;
+ u16 device;
+};
+
+struct wx_thermal_sensor_data {
+ s16 temp;
+ s16 alarm_thresh;
+ s16 dalarm_thresh;
+};
+
+enum wx_mac_type {
+ wx_mac_unknown = 0,
+ wx_mac_sp,
+ wx_mac_em
+};
+
+struct wx_mac_info {
+ enum wx_mac_type type;
+ bool set_lben;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 num_rar_entries;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+
+ u16 max_msix_vectors;
+ struct wx_thermal_sensor_data sensor;
+};
+
+enum wx_eeprom_type {
+ wx_eeprom_uninitialized = 0,
+ wx_eeprom_spi,
+ wx_flash,
+ wx_eeprom_none /* No NVM support */
+};
+
+struct wx_eeprom_info {
+ enum wx_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 sw_region_offset;
+};
+
+struct wx_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 mta_in_use;
+ bool user_set_promisc;
+};
+
+enum wx_reset_type {
+ WX_LAN_RESET = 0,
+ WX_SW_RESET,
+ WX_GLOBAL_RESET
+};
+
+struct wx_hw {
+ u8 __iomem *hw_addr;
+ struct pci_dev *pdev;
+ struct wx_bus_info bus;
+ struct wx_mac_info mac;
+ struct wx_eeprom_info eeprom;
+ struct wx_addr_filter_info addr_ctrl;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u16 oem_ssid;
+ u16 oem_svid;
+ bool adapter_stopped;
+ enum wx_reset_type reset_type;
+};
+
+#define WX_INTR_ALL (~0ULL)
+
+/* register operations */
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+#define rd32a(a, reg, offset) ( \
+ rd32((a), (reg) + ((offset) << 2)))
+#define wr32a(a, reg, off, val) \
+ wr32((a), (reg) + ((off) << 2), (val))
+
+static inline u32
+rd32m(struct wx_hw *wxhw, u32 reg, u32 mask)
+{
+ u32 val;
+
+ val = rd32(wxhw, reg);
+ return val & mask;
+}
+
+static inline void
+wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field)
+{
+ u32 val;
+
+ val = rd32(wxhw, reg);
+ val = ((val & ~mask) | (field & mask));
+
+ wr32(wxhw, reg, val);
+}
+
+/* On some domestic CPU platforms, sometimes IO is not synchronized with
+ * flushing memory, here use readl() to flush PCI read and write.
+ */
+#define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR)
+
+#define wx_err(wxhw, fmt, arg...) \
+ dev_err(&(wxhw)->pdev->dev, fmt, ##arg)
+
+#define wx_dbg(wxhw, fmt, arg...) \
+ dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg)
+
+#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile
index 0baf75907496..391c2cbc1bb4 100644
--- a/drivers/net/ethernet/wangxun/ngbe/Makefile
+++ b/drivers/net/ethernet/wangxun/ngbe/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_NGBE) += ngbe.o
-ngbe-objs := ngbe_main.o
+ngbe-objs := ngbe_main.o ngbe_hw.o
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
index f5fa6e5238cc..af147ca8605c 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
@@ -11,12 +11,67 @@
#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+#define NGBE_ETH_LENGTH_OF_ADDRESS 6
+#define NGBE_MAX_MSIX_VECTORS 0x09
+#define NGBE_RAR_ENTRIES 32
+
+/* TX/RX descriptor defines */
+#define NGBE_DEFAULT_TXD 512 /* default ring size */
+#define NGBE_DEFAULT_TX_WORK 256
+#define NGBE_MAX_TXD 8192
+#define NGBE_MIN_TXD 128
+
+#define NGBE_DEFAULT_RXD 512 /* default ring size */
+#define NGBE_DEFAULT_RX_WORK 256
+#define NGBE_MAX_RXD 8192
+#define NGBE_MIN_RXD 128
+
+#define NGBE_MAC_STATE_DEFAULT 0x1
+#define NGBE_MAC_STATE_MODIFIED 0x2
+#define NGBE_MAC_STATE_IN_USE 0x4
+
+struct ngbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 state; /* bitmask */
+ u64 pools;
+};
+
/* board specific private data structure */
struct ngbe_adapter {
u8 __iomem *io_addr; /* Mainly for iounmap use */
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
+
+ /* structs defined in ngbe_hw.h */
+ struct ngbe_hw hw;
+ struct ngbe_mac_addr *mac_table;
+ u16 msg_enable;
+
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
+ u16 tx_work_limit;
+
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+ u16 rx_work_limit;
+
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* upper limit of q_vectors for device */
+
+ u32 tx_ring_count;
+ u32 rx_ring_count;
+
+#define NGBE_MAX_RETA_ENTRIES 128
+ u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES];
+
+#define NGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+ u32 *rss_key;
+ u32 wol;
+
+ u16 bd_number;
};
extern char ngbe_driver_name[];
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
new file mode 100644
index 000000000000..0e3923b3737e
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "ngbe_type.h"
+#include "ngbe_hw.h"
+#include "ngbe.h"
+
+int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw)
+{
+ struct wx_hic_read_shadow_ram buffer;
+ struct wx_hw *wxhw = &hw->wxhw;
+ int status;
+ int tmp;
+
+ buffer.hdr.req.cmd = NGBE_FW_EEPROM_CHECKSUM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = 0;
+ buffer.hdr.req.checksum = NGBE_FW_CMD_DEFAULT_CHECKSUM;
+ /* convert offset from words to bytes */
+ buffer.address = 0;
+ /* one word */
+ buffer.length = 0;
+
+ status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, false);
+
+ if (status < 0)
+ return status;
+ tmp = rd32a(wxhw, WX_MNG_MBOX, 1);
+ if (tmp == NGBE_FW_CMD_ST_PASS)
+ return 0;
+ return -EIO;
+}
+
+static int ngbe_reset_misc(struct ngbe_hw *hw)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+
+ wx_reset_misc(wxhw);
+ if (hw->mac_type == ngbe_mac_type_rgmii)
+ wr32(wxhw, NGBE_MDIO_CLAUSE_SELECT, 0xF);
+ if (hw->gpio_ctrl) {
+ /* gpio0 is used to power on/off control*/
+ wr32(wxhw, NGBE_GPIO_DDR, 0x1);
+ wr32(wxhw, NGBE_GPIO_DR, NGBE_GPIO_DR_0);
+ }
+ return 0;
+}
+
+/**
+ * ngbe_reset_hw - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+int ngbe_reset_hw(struct ngbe_hw *hw)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+ int status = 0;
+ u32 reset = 0;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = wx_stop_adapter(wxhw);
+ if (status != 0)
+ return status;
+ reset = WX_MIS_RST_LAN_RST(wxhw->bus.func);
+ wr32(wxhw, WX_MIS_RST, reset | rd32(wxhw, WX_MIS_RST));
+ ngbe_reset_misc(hw);
+
+ /* Store the permanent mac address */
+ wx_get_mac_addr(wxhw, wxhw->mac.perm_addr);
+
+ /* reset num_rar_entries to 128 */
+ wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES;
+ wx_init_rx_addrs(wxhw);
+ pci_set_master(wxhw->pdev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
new file mode 100644
index 000000000000..42476a3fe57c
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * WangXun Gigabit PCI Express Linux driver
+ * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+ */
+
+#ifndef _NGBE_HW_H_
+#define _NGBE_HW_H_
+
+int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw);
+int ngbe_reset_hw(struct ngbe_hw *hw);
+#endif /* _NGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 7674cb6e5700..f0b24366da18 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -8,7 +8,12 @@
#include <linux/string.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "ngbe_type.h"
+#include "ngbe_hw.h"
#include "ngbe.h"
char ngbe_driver_name[] = "ngbe";
@@ -34,6 +39,247 @@ static const struct pci_device_id ngbe_pci_tbl[] = {
{ .device = 0 }
};
+static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, u8 *addr)
+{
+ struct ngbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].pools = 1ULL;
+ adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT |
+ NGBE_MAC_STATE_IN_USE);
+ wx_set_rar(&hw->wxhw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+}
+
+/**
+ * ngbe_init_type_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ **/
+static void ngbe_init_type_code(struct ngbe_hw *hw)
+{
+ int wol_mask = 0, ncsi_mask = 0;
+ struct wx_hw *wxhw = &hw->wxhw;
+ u16 type_mask = 0;
+
+ wxhw->mac.type = wx_mac_em;
+ type_mask = (u16)(wxhw->subsystem_device_id & NGBE_OEM_MASK);
+ ncsi_mask = wxhw->subsystem_device_id & NGBE_NCSI_MASK;
+ wol_mask = wxhw->subsystem_device_id & NGBE_WOL_MASK;
+
+ switch (type_mask) {
+ case NGBE_SUBID_M88E1512_SFP:
+ case NGBE_SUBID_LY_M88E1512_SFP:
+ hw->phy.type = ngbe_phy_m88e1512_sfi;
+ break;
+ case NGBE_SUBID_M88E1512_RJ45:
+ hw->phy.type = ngbe_phy_m88e1512;
+ break;
+ case NGBE_SUBID_M88E1512_MIX:
+ hw->phy.type = ngbe_phy_m88e1512_unknown;
+ break;
+ case NGBE_SUBID_YT8521S_SFP:
+ case NGBE_SUBID_YT8521S_SFP_GPIO:
+ case NGBE_SUBID_LY_YT8521S_SFP:
+ hw->phy.type = ngbe_phy_yt8521s_sfi;
+ break;
+ case NGBE_SUBID_INTERNAL_YT8521S_SFP:
+ case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
+ hw->phy.type = ngbe_phy_internal_yt8521s_sfi;
+ break;
+ case NGBE_SUBID_RGMII_FPGA:
+ case NGBE_SUBID_OCP_CARD:
+ fallthrough;
+ default:
+ hw->phy.type = ngbe_phy_internal;
+ break;
+ }
+
+ if (hw->phy.type == ngbe_phy_internal ||
+ hw->phy.type == ngbe_phy_internal_yt8521s_sfi)
+ hw->mac_type = ngbe_mac_type_mdi;
+ else
+ hw->mac_type = ngbe_mac_type_rgmii;
+
+ hw->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
+ hw->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
+ type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0;
+
+ switch (type_mask) {
+ case NGBE_SUBID_LY_YT8521S_SFP:
+ case NGBE_SUBID_LY_M88E1512_SFP:
+ case NGBE_SUBID_YT8521S_SFP_GPIO:
+ case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
+ hw->gpio_ctrl = 1;
+ break;
+ default:
+ hw->gpio_ctrl = 0;
+ break;
+ }
+}
+
+/**
+ * ngbe_init_rss_key - Initialize adapter RSS key
+ * @adapter: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter)
+{
+ u32 *rss_key;
+
+ if (!adapter->rss_key) {
+ rss_key = kzalloc(NGBE_RSS_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, NGBE_RSS_KEY_SIZE);
+ adapter->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
+/**
+ * ngbe_sw_init - Initialize general software structures
+ * @adapter: board private structure to initialize
+ **/
+static int ngbe_sw_init(struct ngbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ngbe_hw *hw = &adapter->hw;
+ struct wx_hw *wxhw = &hw->wxhw;
+ u16 msix_count = 0;
+ int err = 0;
+
+ wxhw->hw_addr = adapter->io_addr;
+ wxhw->pdev = pdev;
+
+ /* PCI config space info */
+ err = wx_sw_init(wxhw);
+ if (err < 0) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Read of internal subsystem device id failed\n");
+ return err;
+ }
+
+ /* mac type, phy type , oem type */
+ ngbe_init_type_code(hw);
+
+ wxhw->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
+ wxhw->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
+ wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES;
+ /* Set common capability flags and settings */
+ adapter->max_q_vectors = NGBE_MAX_MSIX_VECTORS;
+
+ err = wx_get_pcie_msix_counts(wxhw, &msix_count, NGBE_MAX_MSIX_VECTORS);
+ if (err)
+ dev_err(&pdev->dev, "Do not support MSI-X\n");
+ wxhw->mac.max_msix_vectors = msix_count;
+
+ adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries,
+ sizeof(struct ngbe_mac_addr),
+ GFP_KERNEL);
+ if (!adapter->mac_table) {
+ dev_err(&pdev->dev, "mac_table allocation failed: %d\n", err);
+ return -ENOMEM;
+ }
+
+ if (ngbe_init_rss_key(adapter))
+ return -ENOMEM;
+
+ /* enable itr by default in dynamic mode */
+ adapter->rx_itr_setting = 1;
+ adapter->tx_itr_setting = 1;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = NGBE_DEFAULT_TXD;
+ adapter->rx_ring_count = NGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK;
+ adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+
+ return 0;
+}
+
+static void ngbe_down(struct ngbe_adapter *adapter)
+{
+ netif_carrier_off(adapter->netdev);
+ netif_tx_disable(adapter->netdev);
+};
+
+/**
+ * ngbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).
+ **/
+static int ngbe_open(struct net_device *netdev)
+{
+ struct ngbe_adapter *adapter = netdev_priv(netdev);
+ struct ngbe_hw *hw = &adapter->hw;
+ struct wx_hw *wxhw = &hw->wxhw;
+
+ wx_control_hw(wxhw, true);
+
+ return 0;
+}
+
+/**
+ * ngbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ngbe_close(struct net_device *netdev)
+{
+ struct ngbe_adapter *adapter = netdev_priv(netdev);
+
+ ngbe_down(adapter);
+ wx_control_hw(&adapter->hw.wxhw, false);
+
+ return 0;
+}
+
+static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ngbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ngbe_set_mac(struct net_device *netdev, void *p)
+{
+ struct ngbe_adapter *adapter = netdev_priv(netdev);
+ struct wx_hw *wxhw = &adapter->hw.wxhw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+ memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ ngbe_mac_set_default_filter(adapter, wxhw->mac.addr);
+
+ return 0;
+}
+
static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -41,13 +287,22 @@ static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
+ rtnl_lock();
+ if (netif_running(netdev))
+ ngbe_down(adapter);
+ rtnl_unlock();
+ wx_control_hw(&adapter->hw.wxhw, false);
+
pci_disable_device(pdev);
}
static void ngbe_shutdown(struct pci_dev *pdev)
{
+ struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
bool wake;
+ wake = !!adapter->wol;
+
ngbe_dev_shutdown(pdev, &wake);
if (system_state == SYSTEM_POWER_OFF) {
@@ -56,6 +311,14 @@ static void ngbe_shutdown(struct pci_dev *pdev)
}
}
+static const struct net_device_ops ngbe_netdev_ops = {
+ .ndo_open = ngbe_open,
+ .ndo_stop = ngbe_close,
+ .ndo_start_xmit = ngbe_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ngbe_set_mac,
+};
+
/**
* ngbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -71,7 +334,14 @@ static int ngbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct ngbe_adapter *adapter = NULL;
+ struct ngbe_hw *hw = NULL;
+ struct wx_hw *wxhw = NULL;
struct net_device *netdev;
+ u32 e2rom_cksum_cap = 0;
+ static int func_nums;
+ u16 e2rom_ver = 0;
+ u32 etrack_id = 0;
+ u32 saved_ver = 0;
int err;
err = pci_enable_device_mem(pdev);
@@ -111,6 +381,9 @@ static int ngbe_probe(struct pci_dev *pdev,
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ hw = &adapter->hw;
+ wxhw = &hw->wxhw;
+ adapter->msg_enable = BIT(3) - 1;
adapter->io_addr = devm_ioremap(&pdev->dev,
pci_resource_start(pdev, 0),
@@ -120,12 +393,101 @@ static int ngbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ netdev->netdev_ops = &ngbe_netdev_ops;
+
netdev->features |= NETIF_F_HIGHDMA;
+ adapter->bd_number = func_nums;
+ /* setup the private structure */
+ err = ngbe_sw_init(adapter);
+ if (err)
+ goto err_free_mac_table;
+
+ /* check if flash load is done after hw power up */
+ err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PERST);
+ if (err)
+ goto err_free_mac_table;
+ err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PWRRST);
+ if (err)
+ goto err_free_mac_table;
+
+ err = wx_mng_present(wxhw);
+ if (err) {
+ dev_err(&pdev->dev, "Management capability is not present\n");
+ goto err_free_mac_table;
+ }
+
+ err = ngbe_reset_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "HW Init failed: %d\n", err);
+ goto err_free_mac_table;
+ }
+
+ if (wxhw->bus.func == 0) {
+ wr32(wxhw, NGBE_CALSUM_CAP_STATUS, 0x0);
+ wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, 0x0);
+ } else {
+ e2rom_cksum_cap = rd32(wxhw, NGBE_CALSUM_CAP_STATUS);
+ saved_ver = rd32(wxhw, NGBE_EEPROM_VERSION_STORE_REG);
+ }
+
+ wx_init_eeprom_params(wxhw);
+ if (wxhw->bus.func == 0 || e2rom_cksum_cap == 0) {
+ /* make sure the EEPROM is ready */
+ err = ngbe_eeprom_chksum_hostif(hw);
+ if (err) {
+ dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_free_mac_table;
+ }
+ }
+
+ adapter->wol = 0;
+ if (hw->wol_enabled)
+ adapter->wol = NGBE_PSR_WKUP_CTL_MAG;
+
+ hw->wol_enabled = !!(adapter->wol);
+ wr32(wxhw, NGBE_PSR_WKUP_CTL, adapter->wol);
+
+ device_set_wakeup_enable(&pdev->dev, adapter->wol);
+
+ /* Save off EEPROM version number and Option Rom version which
+ * together make a unique identify for the eeprom
+ */
+ if (saved_ver) {
+ etrack_id = saved_ver;
+ } else {
+ wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H,
+ &e2rom_ver);
+ etrack_id = e2rom_ver << 16;
+ wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L,
+ &e2rom_ver);
+ etrack_id |= e2rom_ver;
+ wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
+ }
+
+ eth_hw_addr_set(netdev, wxhw->mac.perm_addr);
+ ngbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
pci_set_drvdata(pdev, adapter);
+ netif_info(adapter, probe, netdev,
+ "PHY: %s, PBA No: Wang Xun GbE Family Controller\n",
+ hw->phy.type == ngbe_phy_internal ? "Internal" : "External");
+ netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr);
+
return 0;
+err_register:
+ wx_control_hw(wxhw, false);
+err_free_mac_table:
+ kfree(adapter->mac_table);
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
@@ -146,9 +508,15 @@ err_pci_disable_dev:
**/
static void ngbe_remove(struct pci_dev *pdev)
{
+ struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ netdev = adapter->netdev;
+ unregister_netdev(netdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(adapter->mac_table);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 26e776c3539a..39f6c03f1a54 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -8,11 +8,6 @@
#include <linux/netdevice.h>
/************ NGBE_register.h ************/
-/* Vendor ID */
-#ifndef PCI_VENDOR_ID_WANGXUN
-#define PCI_VENDOR_ID_WANGXUN 0x8088
-#endif
-
/* Device IDs */
#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100
#define NGBE_DEV_ID_EM_WX1860A2 0x0101
@@ -47,4 +42,98 @@
#define NGBE_WOL_SUP 0x4000
#define NGBE_WOL_MASK 0x4000
+/**************** EM Registers ****************************/
+/* chip control Registers */
+#define NGBE_MIS_PRB_CTL 0x10010
+/* FMGR Registers */
+#define NGBE_SPI_ILDR_STATUS 0x10120
+#define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */
+#define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */
+#define NGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */
+
+/* Checksum and EEPROM pointers */
+#define NGBE_CALSUM_COMMAND 0xE9
+#define NGBE_CALSUM_CAP_STATUS 0x10224
+#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C
+#define NGBE_SAN_MAC_ADDR_PTR 0x18
+#define NGBE_DEVICE_CAPS 0x1C
+#define NGBE_EEPROM_VERSION_L 0x1D
+#define NGBE_EEPROM_VERSION_H 0x1E
+
+/* Media-dependent registers. */
+#define NGBE_MDIO_CLAUSE_SELECT 0x11220
+
+/* GPIO Registers */
+#define NGBE_GPIO_DR 0x14800
+#define NGBE_GPIO_DDR 0x14804
+/*GPIO bit */
+#define NGBE_GPIO_DR_0 BIT(0) /* SDP0 Data Value */
+#define NGBE_GPIO_DR_1 BIT(1) /* SDP1 Data Value */
+#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
+#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+
+/* Wake up registers */
+#define NGBE_PSR_WKUP_CTL 0x15B80
+/* Wake Up Filter Control Bit */
+#define NGBE_PSR_WKUP_CTL_LNKC BIT(0) /* Link Status Change Wakeup Enable*/
+#define NGBE_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */
+#define NGBE_PSR_WKUP_CTL_EX BIT(2) /* Directed Exact Wakeup Enable */
+#define NGBE_PSR_WKUP_CTL_MC BIT(3) /* Directed Multicast Wakeup Enable*/
+#define NGBE_PSR_WKUP_CTL_BC BIT(4) /* Broadcast Wakeup Enable */
+#define NGBE_PSR_WKUP_CTL_ARP BIT(5) /* ARP Request Packet Wakeup Enable*/
+#define NGBE_PSR_WKUP_CTL_IPV4 BIT(6) /* Directed IPv4 Pkt Wakeup Enable */
+#define NGBE_PSR_WKUP_CTL_IPV6 BIT(7) /* Directed IPv6 Pkt Wakeup Enable */
+
+#define NGBE_FW_EEPROM_CHECKSUM_CMD 0xE9
+#define NGBE_FW_NVM_DATA_OFFSET 3
+#define NGBE_FW_CMD_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define NGBE_FW_CMD_ST_PASS 0x80658383
+#define NGBE_FW_CMD_ST_FAIL 0x70657376
+
+enum ngbe_phy_type {
+ ngbe_phy_unknown = 0,
+ ngbe_phy_none,
+ ngbe_phy_internal,
+ ngbe_phy_m88e1512,
+ ngbe_phy_m88e1512_sfi,
+ ngbe_phy_m88e1512_unknown,
+ ngbe_phy_yt8521s,
+ ngbe_phy_yt8521s_sfi,
+ ngbe_phy_internal_yt8521s_sfi,
+ ngbe_phy_generic
+};
+
+enum ngbe_media_type {
+ ngbe_media_type_unknown = 0,
+ ngbe_media_type_fiber,
+ ngbe_media_type_copper,
+ ngbe_media_type_backplane,
+};
+
+enum ngbe_mac_type {
+ ngbe_mac_type_unknown = 0,
+ ngbe_mac_type_mdi,
+ ngbe_mac_type_rgmii
+};
+
+struct ngbe_phy_info {
+ enum ngbe_phy_type type;
+ enum ngbe_media_type media_type;
+
+ u32 addr;
+ u32 id;
+
+ bool reset_if_overtemp;
+
+};
+
+struct ngbe_hw {
+ struct wx_hw wxhw;
+ struct ngbe_phy_info phy;
+ enum ngbe_mac_type mac_type;
+
+ bool wol_enabled;
+ bool ncsi_enabled;
+ bool gpio_ctrl;
+};
#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 431303ca75b4..78484c58b78b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_TXGBE) += txgbe.o
-txgbe-objs := txgbe_main.o
+txgbe-objs := txgbe_main.o \
+ txgbe_hw.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
index 38ddbde0ed0f..19e61377bd00 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -4,19 +4,38 @@
#ifndef _TXGBE_H_
#define _TXGBE_H_
-#include "txgbe_type.h"
-
#define TXGBE_MAX_FDIR_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_SP_MAX_TX_QUEUES 128
+#define TXGBE_SP_MAX_RX_QUEUES 128
+#define TXGBE_SP_RAR_ENTRIES 128
+#define TXGBE_SP_MC_TBL_SIZE 128
+
+struct txgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 state; /* bitmask */
+ u64 pools;
+};
+
+#define TXGBE_MAC_STATE_DEFAULT 0x1
+#define TXGBE_MAC_STATE_MODIFIED 0x2
+#define TXGBE_MAC_STATE_IN_USE 0x4
+
/* board specific private data structure */
struct txgbe_adapter {
u8 __iomem *io_addr; /* Mainly for iounmap use */
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
+
+ /* structs defined in txgbe_type.h */
+ struct txgbe_hw hw;
+ u16 msg_enable;
+ struct txgbe_mac_addr *mac_table;
+ char eeprom_id[32];
};
extern char txgbe_driver_name[];
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
new file mode 100644
index 000000000000..167f7ff73192
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/string.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_hw.h"
+#include "txgbe.h"
+
+/**
+ * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+static void txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+ struct wx_thermal_sensor_data *data = &wxhw->mac.sensor;
+
+ memset(data, 0, sizeof(struct wx_thermal_sensor_data));
+
+ /* Only support thermal sensors attached to SP physical port 0 */
+ if (wxhw->bus.func)
+ return;
+
+ wr32(wxhw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD);
+
+ wr32(wxhw, WX_TS_INT_EN,
+ WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN);
+ wr32(wxhw, WX_TS_EN, WX_TS_EN_ENA);
+
+ data->alarm_thresh = 100;
+ wr32(wxhw, WX_TS_ALARM_THRE, 677);
+ data->dalarm_thresh = 90;
+ wr32(wxhw, WX_TS_DALARM_THRE, 614);
+}
+
+/**
+ * txgbe_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ u16 pba_ptr, offset, length, data;
+ struct wx_hw *wxhw = &hw->wxhw;
+ int ret_val;
+
+ if (!pba_num) {
+ wx_err(wxhw, "PBA string buffer was null\n");
+ return -EINVAL;
+ }
+
+ ret_val = wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
+ &data);
+ if (ret_val != 0) {
+ wx_err(wxhw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
+ &pba_ptr);
+ if (ret_val != 0) {
+ wx_err(wxhw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != TXGBE_PBANUM_PTR_GUARD) {
+ wx_err(wxhw, "NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ wx_err(wxhw, "PBA string buffer too small\n");
+ return -ENOMEM;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = wx_read_ee_hostif(wxhw, pba_ptr, &length);
+ if (ret_val != 0) {
+ wx_err(wxhw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ wx_err(wxhw, "NVM PBA number section invalid length\n");
+ return -EINVAL;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ wx_err(wxhw, "PBA string buffer too small\n");
+ return -ENOMEM;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = wx_read_ee_hostif(wxhw, pba_ptr + offset, &data);
+ if (ret_val != 0) {
+ wx_err(wxhw, "NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return 0;
+}
+
+/**
+ * txgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to cheksum
+ *
+ * Returns a negative error code on error
+ **/
+static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+ u16 *eeprom_ptrs = NULL;
+ u32 buffer_size = 0;
+ u16 *buffer = NULL;
+ u16 *local_buffer;
+ int status;
+ u16 i;
+
+ wx_init_eeprom_params(wxhw);
+
+ if (!buffer) {
+ eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16),
+ GFP_KERNEL);
+ if (!eeprom_ptrs)
+ return -ENOMEM;
+ /* Read pointer area */
+ status = wx_read_ee_hostif_buffer(wxhw, 0,
+ TXGBE_EEPROM_LAST_WORD,
+ eeprom_ptrs);
+ if (status != 0) {
+ wx_err(wxhw, "Failed to read EEPROM image\n");
+ kvfree(eeprom_ptrs);
+ return status;
+ }
+ local_buffer = eeprom_ptrs;
+ } else {
+ if (buffer_size < TXGBE_EEPROM_LAST_WORD)
+ return -EFAULT;
+ local_buffer = buffer;
+ }
+
+ for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
+ if (i != wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
+ *checksum += local_buffer[i];
+
+ if (eeprom_ptrs)
+ kvfree(eeprom_ptrs);
+
+ if (*checksum > TXGBE_EEPROM_SUM)
+ return -EINVAL;
+
+ *checksum = TXGBE_EEPROM_SUM - *checksum;
+
+ return 0;
+}
+
+/**
+ * txgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+ u16 read_checksum = 0;
+ u16 checksum;
+ int status;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = wx_read_ee_hostif(wxhw, 0, &checksum);
+ if (status) {
+ wx_err(wxhw, "EEPROM read failed\n");
+ return status;
+ }
+
+ checksum = 0;
+ status = txgbe_calc_eeprom_checksum(hw, &checksum);
+ if (status != 0)
+ return status;
+
+ status = wx_read_ee_hostif(wxhw, wxhw->eeprom.sw_region_offset +
+ TXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (status != 0)
+ return status;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ status = -EIO;
+ wx_err(wxhw, "Invalid EEPROM checksum\n");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+static void txgbe_reset_misc(struct txgbe_hw *hw)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+
+ wx_reset_misc(wxhw);
+ txgbe_init_thermal_sensor_thresh(hw);
+}
+
+/**
+ * txgbe_reset_hw - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+int txgbe_reset_hw(struct txgbe_hw *hw)
+{
+ struct wx_hw *wxhw = &hw->wxhw;
+ int status;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = wx_stop_adapter(wxhw);
+ if (status != 0)
+ return status;
+
+ if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
+ wx_reset_hostif(wxhw);
+
+ usleep_range(10, 100);
+
+ status = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wxhw->bus.func));
+ if (status != 0)
+ return status;
+
+ txgbe_reset_misc(hw);
+
+ /* Store the permanent mac address */
+ wx_get_mac_addr(wxhw, wxhw->mac.perm_addr);
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx_init_rx_addrs(wxhw);
+
+ pci_set_master(wxhw->pdev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
new file mode 100644
index 000000000000..6a751a69177b
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_HW_H_
+#define _TXGBE_HW_H_
+
+int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val);
+int txgbe_reset_hw(struct txgbe_hw *hw);
+
+#endif /* _TXGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index d3b9f73ecba4..36780e7f05b7 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -8,7 +8,12 @@
#include <linux/string.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_hw.h"
#include "txgbe.h"
char txgbe_driver_name[] = "txgbe";
@@ -30,13 +35,305 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
+{
+ struct pci_dev *pdev;
+
+ pdev = adapter->pdev;
+ pcie_print_link_status(pdev);
+}
+
+/**
+ * txgbe_enumerate_functions - Get the number of ports this device has
+ * @adapter: adapter structure
+ *
+ * This function enumerates the phsyical functions co-located on a single slot,
+ * in order to determine how many ports a device has. This is most useful in
+ * determining the required GT/s of PCIe bandwidth necessary for optimal
+ * performance.
+ **/
+static int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
+{
+ struct pci_dev *entry, *pdev = adapter->pdev;
+ int physfns = 0;
+
+ list_for_each_entry(entry, &pdev->bus->devices, bus_list) {
+ /* When the devices on the bus don't all match our device ID,
+ * we can't reliably determine the correct number of
+ * functions. This can occur if a function has been direct
+ * attached to a virtual machine using VT-d.
+ */
+ if (entry->vendor != pdev->vendor ||
+ entry->device != pdev->device)
+ return -EINVAL;
+
+ physfns++;
+ }
+
+ return physfns;
+}
+
+static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
+{
+ struct txgbe_hw *hw = &adapter->hw;
+ struct wx_hw *wxhw = &hw->wxhw;
+ int i;
+
+ for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) {
+ wx_set_rar(wxhw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wxhw, i);
+ }
+ adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+/* this function destroys the first RAR entry */
+static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct wx_hw *wxhw = &adapter->hw.wxhw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].pools = 1ULL;
+ adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT |
+ TXGBE_MAC_STATE_IN_USE);
+ wx_set_rar(wxhw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+}
+
+static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
+{
+ struct wx_hw *wxhw = &adapter->hw.wxhw;
+ u32 i;
+
+ for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].pools = 0;
+ }
+ txgbe_sync_mac_table(adapter);
+}
+
+static int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool)
+{
+ struct wx_hw *wxhw = &adapter->hw.wxhw;
+ u32 i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* search table for addr, if found, set to 0 and sync */
+ for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr)) {
+ if (adapter->mac_table[i].pools & (1ULL << pool)) {
+ adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
+ adapter->mac_table[i].pools &= ~(1ULL << pool);
+ txgbe_sync_mac_table(adapter);
+ }
+ return 0;
+ }
+
+ if (adapter->mac_table[i].pools != (1 << pool))
+ continue;
+ if (!ether_addr_equal(addr, adapter->mac_table[i].addr))
+ continue;
+
+ adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].pools = 0;
+ txgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static void txgbe_up_complete(struct txgbe_adapter *adapter)
+{
+ struct txgbe_hw *hw = &adapter->hw;
+ struct wx_hw *wxhw = &hw->wxhw;
+
+ wx_control_hw(wxhw, true);
+}
+
+static void txgbe_reset(struct txgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct txgbe_hw *hw = &adapter->hw;
+ u8 old_addr[ETH_ALEN];
+ int err;
+
+ err = txgbe_reset_hw(hw);
+ if (err != 0)
+ dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ txgbe_flush_sw_mac_table(adapter);
+ txgbe_mac_set_default_filter(adapter, old_addr);
+}
+
+static void txgbe_disable_device(struct txgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct wx_hw *wxhw = &adapter->hw.wxhw;
+
+ wx_disable_pcie_master(wxhw);
+ /* disable receives */
+ wx_disable_rx(wxhw);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ if (wxhw->bus.func < 2)
+ wr32m(wxhw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wxhw->bus.func), 0);
+ else
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid bus lan id %d\n",
+ __func__, wxhw->bus.func);
+
+ if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
+ /* disable mac transmiter */
+ wr32m(wxhw, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ }
+
+ /* Disable the Tx DMA engine */
+ wr32m(wxhw, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
+}
+
+static void txgbe_down(struct txgbe_adapter *adapter)
+{
+ txgbe_disable_device(adapter);
+ txgbe_reset(adapter);
+}
+
+/**
+ * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter)
+ * @adapter: board private structure to initialize
+ **/
+static int txgbe_sw_init(struct txgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct txgbe_hw *hw = &adapter->hw;
+ struct wx_hw *wxhw = &hw->wxhw;
+ int err;
+
+ wxhw->hw_addr = adapter->io_addr;
+ wxhw->pdev = pdev;
+
+ /* PCI config space info */
+ err = wx_sw_init(wxhw);
+ if (err < 0) {
+ netif_err(adapter, probe, adapter->netdev,
+ "read of internal subsystem device id failed\n");
+ return err;
+ }
+
+ switch (wxhw->device_id) {
+ case TXGBE_DEV_ID_SP1000:
+ case TXGBE_DEV_ID_WX1820:
+ wxhw->mac.type = wx_mac_sp;
+ break;
+ default:
+ wxhw->mac.type = wx_mac_unknown;
+ break;
+ }
+
+ wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wxhw->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
+ wxhw->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
+ wxhw->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
+
+ adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries,
+ sizeof(struct txgbe_mac_addr),
+ GFP_KERNEL);
+ if (!adapter->mac_table) {
+ netif_err(adapter, probe, adapter->netdev,
+ "mac_table allocation failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).
+ **/
+static int txgbe_open(struct net_device *netdev)
+{
+ struct txgbe_adapter *adapter = netdev_priv(netdev);
+
+ txgbe_up_complete(adapter);
+
+ return 0;
+}
+
+/**
+ * txgbe_close_suspend - actions necessary to both suspend and close flows
+ * @adapter: the private adapter struct
+ *
+ * This function should contain the necessary work common to both suspending
+ * and closing of the device.
+ */
+static void txgbe_close_suspend(struct txgbe_adapter *adapter)
+{
+ txgbe_disable_device(adapter);
+}
+
+/**
+ * txgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int txgbe_close(struct net_device *netdev)
+{
+ struct txgbe_adapter *adapter = netdev_priv(netdev);
+
+ txgbe_down(adapter);
+ wx_control_hw(&adapter->hw.wxhw, false);
+
+ return 0;
+}
+
static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ struct txgbe_hw *hw = &adapter->hw;
+ struct wx_hw *wxhw = &hw->wxhw;
netif_device_detach(netdev);
+ rtnl_lock();
+ if (netif_running(netdev))
+ txgbe_close_suspend(adapter);
+ rtnl_unlock();
+
+ wx_control_hw(wxhw, false);
+
pci_disable_device(pdev);
}
@@ -52,6 +349,47 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
+static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ return NETDEV_TX_OK;
+}
+
+/**
+ * txgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int txgbe_set_mac(struct net_device *netdev, void *p)
+{
+ struct txgbe_adapter *adapter = netdev_priv(netdev);
+ struct wx_hw *wxhw = &adapter->hw.wxhw;
+ struct sockaddr *addr = p;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, addr);
+ if (retval)
+ return retval;
+
+ txgbe_del_mac_filter(adapter, wxhw->mac.addr, 0);
+ eth_hw_addr_set(netdev, addr->sa_data);
+ memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ txgbe_mac_set_default_filter(adapter, wxhw->mac.addr);
+
+ return 0;
+}
+
+static const struct net_device_ops txgbe_netdev_ops = {
+ .ndo_open = txgbe_open,
+ .ndo_stop = txgbe_close,
+ .ndo_start_xmit = txgbe_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = txgbe_set_mac,
+};
+
/**
* txgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -67,8 +405,16 @@ static int txgbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct txgbe_adapter *adapter = NULL;
+ struct txgbe_hw *hw = NULL;
+ struct wx_hw *wxhw = NULL;
struct net_device *netdev;
- int err;
+ int err, expected_gts;
+
+ u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
+ u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
+ u16 build = 0, major = 0, patch = 0;
+ u8 part_str[TXGBE_PBANUM_LENGTH];
+ u32 etrack_id = 0;
err = pci_enable_device_mem(pdev);
if (err)
@@ -107,6 +453,9 @@ static int txgbe_probe(struct pci_dev *pdev,
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ hw = &adapter->hw;
+ wxhw = &hw->wxhw;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
adapter->io_addr = devm_ioremap(&pdev->dev,
pci_resource_start(pdev, 0),
@@ -116,12 +465,118 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ netdev->netdev_ops = &txgbe_netdev_ops;
+
+ /* setup the private structure */
+ err = txgbe_sw_init(adapter);
+ if (err)
+ goto err_free_mac_table;
+
+ /* check if flash load is done after hw power up */
+ err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PERST);
+ if (err)
+ goto err_free_mac_table;
+ err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PWRRST);
+ if (err)
+ goto err_free_mac_table;
+
+ err = wx_mng_present(wxhw);
+ if (err) {
+ dev_err(&pdev->dev, "Management capability is not present\n");
+ goto err_free_mac_table;
+ }
+
+ err = txgbe_reset_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "HW Init failed: %d\n", err);
+ goto err_free_mac_table;
+ }
+
netdev->features |= NETIF_F_HIGHDMA;
+ /* make sure the EEPROM is good */
+ err = txgbe_validate_eeprom_checksum(hw, NULL);
+ if (err != 0) {
+ dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+ wr32(wxhw, WX_MIS_RST, WX_MIS_RST_SW_RST);
+ err = -EIO;
+ goto err_free_mac_table;
+ }
+
+ eth_hw_addr_set(netdev, wxhw->mac.perm_addr);
+ txgbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr);
+
+ /* Save off EEPROM version number and Option Rom version which
+ * together make a unique identify for the eeprom
+ */
+ wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
+ &eeprom_verh);
+ wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
+ &eeprom_verl);
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+
+ wx_read_ee_hostif(wxhw,
+ wxhw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG,
+ &offset);
+
+ /* Make sure offset to SCSI block is valid */
+ if (!(offset == 0x0) && !(offset == 0xffff)) {
+ wx_read_ee_hostif(wxhw, offset + 0x84, &eeprom_cfg_blkh);
+ wx_read_ee_hostif(wxhw, offset + 0x83, &eeprom_cfg_blkl);
+
+ /* Only display Option Rom if exist */
+ if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
+ major = eeprom_cfg_blkl >> 8;
+ build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
+ patch = eeprom_cfg_blkh & 0x00ff;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", etrack_id, major, build,
+ patch);
+ } else {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", etrack_id);
+ }
+ } else {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", etrack_id);
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_release_hw;
+
pci_set_drvdata(pdev, adapter);
+ /* calculate the expected PCIe bandwidth required for optimal
+ * performance. Note that some older parts will never have enough
+ * bandwidth due to being older generation PCIe parts. We clamp these
+ * parts to ensure that no warning is displayed, as this could confuse
+ * users otherwise.
+ */
+ expected_gts = txgbe_enumerate_functions(adapter) * 10;
+
+ /* don't check link if we failed to enumerate functions */
+ if (expected_gts > 0)
+ txgbe_check_minimum_link(adapter);
+ else
+ dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n");
+
+ /* First try to read PBA as a string */
+ err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH);
+
+ netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr);
+
return 0;
+err_release_hw:
+ wx_control_hw(wxhw, false);
+err_free_mac_table:
+ kfree(adapter->mac_table);
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
@@ -142,9 +597,17 @@ err_pci_disable_dev:
**/
static void txgbe_remove(struct pci_dev *pdev)
{
+ struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ netdev = adapter->netdev;
+ unregister_netdev(netdev);
+
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(adapter->mac_table);
+
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index b2e329f50bae..740a1c447e20 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -4,15 +4,6 @@
#ifndef _TXGBE_TYPE_H_
#define _TXGBE_TYPE_H_
-#include <linux/types.h>
-#include <linux/netdevice.h>
-
-/************ txgbe_register.h ************/
-/* Vendor ID */
-#ifndef PCI_VENDOR_ID_WANGXUN
-#define PCI_VENDOR_ID_WANGXUN 0x8088
-#endif
-
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
@@ -42,16 +33,42 @@
#define TXGBE_ID_WX1820_MAC_SGMII 0x2060
#define TXGBE_ID_MAC_SGMII 0x60
-#define TXGBE_NCSI_SUP 0x8000
-#define TXGBE_NCSI_MASK 0x8000
-#define TXGBE_WOL_SUP 0x4000
-#define TXGBE_WOL_MASK 0x4000
-#define TXGBE_DEV_MASK 0xf0
-
/* Combined interface*/
#define TXGBE_ID_SFI_XAUI 0x50
/* Revision ID */
#define TXGBE_SP_MPW 1
+/**************** SP Registers ****************************/
+/* chip control Registers */
+#define TXGBE_MIS_PRB_CTL 0x10010
+#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i))
+/* FMGR Registers */
+#define TXGBE_SPI_ILDR_STATUS 0x10120
+#define TXGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */
+#define TXGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */
+#define TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */
+
+/* Sensors for PVT(Process Voltage Temperature) */
+#define TXGBE_TS_CTL 0x10300
+#define TXGBE_TS_CTL_EVAL_MD BIT(31)
+
+/* Part Number String Length */
+#define TXGBE_PBANUM_LENGTH 32
+
+/* Checksum and EEPROM pointers */
+#define TXGBE_EEPROM_LAST_WORD 0x800
+#define TXGBE_EEPROM_CHECKSUM 0x2F
+#define TXGBE_EEPROM_SUM 0xBABA
+#define TXGBE_EEPROM_VERSION_L 0x1D
+#define TXGBE_EEPROM_VERSION_H 0x1E
+#define TXGBE_ISCSI_BOOT_CONFIG 0x07
+#define TXGBE_PBANUM0_PTR 0x05
+#define TXGBE_PBANUM1_PTR 0x06
+#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
+
+struct txgbe_hw {
+ struct wx_hw wxhw;
+};
+
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 6370c447ac5c..575ff9de8985 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -611,8 +611,6 @@ static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
#endif /* CONFIG_64BIT */
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
-int axienet_mdio_enable(struct axienet_local *lp);
-void axienet_mdio_disable(struct axienet_local *lp);
int axienet_mdio_setup(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d1d772580da9..3e310b55bce2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1305,16 +1305,16 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
+ start = u64_stats_fetch_begin(&lp->rx_stat_sync);
stats->rx_packets = u64_stats_read(&lp->rx_packets);
stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
- } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
+ } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
do {
- start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
+ start = u64_stats_fetch_begin(&lp->tx_stat_sync);
stats->tx_packets = u64_stats_read(&lp->tx_packets);
stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
+ } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
}
static const struct net_device_ops axienet_netdev_ops = {
@@ -1736,7 +1736,6 @@ static void axienet_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = phylink_generic_validate,
.mac_select_pcs = axienet_mac_select_pcs,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
@@ -2217,12 +2216,48 @@ static void axienet_shutdown(struct platform_device *pdev)
rtnl_unlock();
}
+static int axienet_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+
+ rtnl_lock();
+ axienet_stop(ndev);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int axienet_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ rtnl_lock();
+ axienet_open(ndev);
+ rtnl_unlock();
+
+ netif_device_attach(ndev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
+ axienet_suspend, axienet_resume);
+
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
.remove = axienet_remove,
.shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
+ .pm = &axienet_pm_ops,
.of_match_table = axienet_of_match,
},
};
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 0b3b6935c558..2f07fde361aa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -17,7 +17,7 @@
#include "xilinx_axienet.h"
-#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
+#define DEFAULT_MDIO_FREQ 2500000 /* 2.5 MHz */
#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
/* Wait till MDIO interface is ready to accept a new transaction.*/
@@ -147,15 +147,20 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
/**
* axienet_mdio_enable - MDIO hardware setup function
* @lp: Pointer to axienet local data structure.
+ * @np: Pointer to mdio device tree node.
*
- * Return: 0 on success, -ETIMEDOUT on a timeout.
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -EOVERFLOW on a clock
+ * divisor overflow.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
* MDIO interface in hardware.
**/
-int axienet_mdio_enable(struct axienet_local *lp)
+static int axienet_mdio_enable(struct axienet_local *lp, struct device_node *np)
{
+ u32 mdio_freq = DEFAULT_MDIO_FREQ;
u32 host_clock;
+ u32 clk_div;
+ int ret;
lp->mii_clk_div = 0;
@@ -184,6 +189,12 @@ int axienet_mdio_enable(struct axienet_local *lp)
host_clock);
}
+ if (np)
+ of_property_read_u32(np, "clock-frequency", &mdio_freq);
+ if (mdio_freq != DEFAULT_MDIO_FREQ)
+ netdev_info(lp->ndev, "Setting non-standard mdio bus frequency to %u Hz\n",
+ mdio_freq);
+
/* clk_div can be calculated by deriving it from the equation:
* fMDIO = fHOST / ((1 + clk_div) * 2)
*
@@ -209,40 +220,42 @@ int axienet_mdio_enable(struct axienet_local *lp)
* "clock-frequency" from the CPU
*/
- lp->mii_clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+ clk_div = (host_clock / (mdio_freq * 2)) - 1;
/* If there is any remainder from the division of
- * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz
+ * fHOST / (mdio_freq * 2), then we need to add
+ * 1 to the clock divisor or we will surely be
+ * above the requested frequency
*/
- if (host_clock % (MAX_MDIO_FREQ * 2))
- lp->mii_clk_div++;
+ if (host_clock % (mdio_freq * 2))
+ clk_div++;
+
+ /* Check for overflow of mii_clk_div */
+ if (clk_div & ~XAE_MDIO_MC_CLOCK_DIVIDE_MAX) {
+ netdev_warn(lp->ndev, "MDIO clock divisor overflow\n");
+ return -EOVERFLOW;
+ }
+ lp->mii_clk_div = (u8)clk_div;
netdev_dbg(lp->ndev,
"Setting MDIO clock divisor to %u/%u Hz host clock.\n",
lp->mii_clk_div, host_clock);
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK);
+ axienet_mdio_mdc_enable(lp);
- return axienet_mdio_wait_until_ready(lp);
-}
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret)
+ axienet_mdio_mdc_disable(lp);
-/**
- * axienet_mdio_disable - MDIO hardware disable function
- * @lp: Pointer to axienet local data structure.
- *
- * Disable the MDIO interface in hardware.
- **/
-void axienet_mdio_disable(struct axienet_local *lp)
-{
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, 0);
+ return ret;
}
/**
* axienet_mdio_setup - MDIO setup function
* @lp: Pointer to axienet local data structure.
*
- * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
- * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -EOVERFLOW on a clock
+ * divisor overflow, -ENOMEM when mdiobus_alloc (to allocate
+ * memory for mii bus structure) fails.
*
* Sets up the MDIO interface by initializing the MDIO clock.
* Register the MDIO interface.
@@ -253,10 +266,6 @@ int axienet_mdio_setup(struct axienet_local *lp)
struct mii_bus *bus;
int ret;
- ret = axienet_mdio_enable(lp);
- if (ret < 0)
- return ret;
-
bus = mdiobus_alloc();
if (!bus)
return -ENOMEM;
@@ -272,15 +281,23 @@ int axienet_mdio_setup(struct axienet_local *lp)
lp->mii_bus = bus;
mdio_node = of_get_child_by_name(lp->dev->of_node, "mdio");
+ ret = axienet_mdio_enable(lp, mdio_node);
+ if (ret < 0)
+ goto unregister;
ret = of_mdiobus_register(bus, mdio_node);
+ if (ret)
+ goto unregister_mdio_enabled;
of_node_put(mdio_node);
- if (ret) {
- mdiobus_free(bus);
- lp->mii_bus = NULL;
- return ret;
- }
axienet_mdio_mdc_disable(lp);
return 0;
+
+unregister_mdio_enabled:
+ axienet_mdio_mdc_disable(lp);
+unregister:
+ of_node_put(mdio_node);
+ mdiobus_free(bus);
+ lp->mii_bus = NULL;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 9abbdb71e629..94203eb46e6b 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -120,24 +120,13 @@ static irqreturn_t isr(int irq, void *priv)
* PTP clock operations
*/
-static int ptp_ixp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ptp_ixp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- u64 adj;
- u32 diff, addend;
- int neg_adj = 0;
+ u32 addend;
struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
struct ixp46x_ts_regs *regs = ixp_clock->regs;
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- addend = DEFAULT_ADDEND;
- adj = addend;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
-
- addend = neg_adj ? addend - diff : addend + diff;
+ addend = adjust_by_scaled_ppm(DEFAULT_ADDEND, scaled_ppm);
__raw_writel(addend, &regs->addend);
@@ -230,7 +219,7 @@ static const struct ptp_clock_info ptp_ixp_caps = {
.n_ext_ts = N_EXT_TS,
.n_pins = 0,
.pps = 0,
- .adjfreq = ptp_ixp_adjfreq,
+ .adjfine = ptp_ixp_adjfine,
.adjtime = ptp_ixp_adjtime,
.gettime64 = ptp_ixp_gettime,
.settime64 = ptp_ixp_settime,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index f393e454f45c..89ff7f8e8c7e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1907,7 +1907,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (err)
goto err;
- err = rtnl_configure_link(dev, NULL);
+ err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto err;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 791b4a53d69f..bd3b0c2655a2 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work)
* ===================== network driver interface =========================
*/
-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 89eb4f179a3c..f9b219e6cd58 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1264,12 +1264,12 @@ static void netvsc_get_vf_stats(struct net_device *net,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
rx_packets = stats->rx_packets;
tx_packets = stats->tx_packets;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1294,12 +1294,12 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
this_tot->vf_rx_packets = stats->rx_packets;
this_tot->vf_tx_packets = stats->tx_packets;
this_tot->vf_rx_bytes = stats->rx_bytes;
this_tot->vf_tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
this_tot->rx_packets = this_tot->vf_rx_packets;
this_tot->tx_packets = this_tot->vf_tx_packets;
this_tot->rx_bytes = this_tot->vf_rx_bytes;
@@ -1318,20 +1318,20 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
@@ -1370,21 +1370,21 @@ static void netvsc_get_stats64(struct net_device *net,
tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
multicast = rx_stats->multicast + rx_stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
@@ -1527,24 +1527,24 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
tx_stats = &nvdev->chan_table[j].tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
xdp_xmit = tx_stats->xdp_xmit;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_xmit;
rx_stats = &nvdev->chan_table[j].rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
xdp_drop = rx_stats->xdp_drop;
xdp_redirect = rx_stats->xdp_redirect;
xdp_tx = rx_stats->xdp_tx;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 2c338783893d..95a4a3cdc8a4 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -191,7 +191,7 @@ static void atusb_work_urbs(struct work_struct *work)
/* ----- Asynchronous USB -------------------------------------------------- */
-static void atusb_tx_done(struct atusb *atusb, u8 seq)
+static void atusb_tx_done(struct atusb *atusb, u8 seq, int reason)
{
struct usb_device *usb_dev = atusb->usb_dev;
u8 expect = atusb->tx_ack_seq;
@@ -199,7 +199,10 @@ static void atusb_tx_done(struct atusb *atusb, u8 seq)
dev_dbg(&usb_dev->dev, "%s (0x%02x/0x%02x)\n", __func__, seq, expect);
if (seq == expect) {
/* TODO check for ifs handling in firmware */
- ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+ if (reason == IEEE802154_SUCCESS)
+ ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+ else
+ ieee802154_xmit_error(atusb->hw, atusb->tx_skb, reason);
} else {
/* TODO I experience this case when atusb has a tx complete
* irq before probing, we should fix the firmware it's an
@@ -215,7 +218,8 @@ static void atusb_in_good(struct urb *urb)
struct usb_device *usb_dev = urb->dev;
struct sk_buff *skb = urb->context;
struct atusb *atusb = SKB_ATUSB(skb);
- u8 len, lqi;
+ int result = IEEE802154_SUCCESS;
+ u8 len, lqi, trac;
if (!urb->actual_length) {
dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
@@ -224,8 +228,27 @@ static void atusb_in_good(struct urb *urb)
len = *skb->data;
- if (urb->actual_length == 1) {
- atusb_tx_done(atusb, len);
+ switch (urb->actual_length) {
+ case 2:
+ trac = TRAC_MASK(*(skb->data + 1));
+ switch (trac) {
+ case TRAC_SUCCESS:
+ case TRAC_SUCCESS_DATA_PENDING:
+ /* already IEEE802154_SUCCESS */
+ break;
+ case TRAC_CHANNEL_ACCESS_FAILURE:
+ result = IEEE802154_CHANNEL_ACCESS_FAILURE;
+ break;
+ case TRAC_NO_ACK:
+ result = IEEE802154_NO_ACK;
+ break;
+ default:
+ result = IEEE802154_SYSTEM_ERROR;
+ }
+
+ fallthrough;
+ case 1:
+ atusb_tx_done(atusb, len, result);
return;
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 2f0544dd7c2a..8445c2189d11 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <net/ieee802154_netdev.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
#include <net/genetlink.h>
@@ -47,6 +48,8 @@ static const struct genl_multicast_group hwsim_mcgrps[] = {
struct hwsim_pib {
u8 page;
u8 channel;
+ struct ieee802154_hw_addr_filt filt;
+ enum ieee802154_filtering_level filt_level;
struct rcu_head rcu;
};
@@ -88,24 +91,168 @@ static int hwsim_hw_ed(struct ieee802154_hw *hw, u8 *level)
return 0;
}
-static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int hwsim_update_pib(struct ieee802154_hw *hw, u8 page, u8 channel,
+ struct ieee802154_hw_addr_filt *filt,
+ enum ieee802154_filtering_level filt_level)
{
struct hwsim_phy *phy = hw->priv;
struct hwsim_pib *pib, *pib_old;
- pib = kzalloc(sizeof(*pib), GFP_KERNEL);
+ pib = kzalloc(sizeof(*pib), GFP_ATOMIC);
if (!pib)
return -ENOMEM;
+ pib_old = rtnl_dereference(phy->pib);
+
pib->page = page;
pib->channel = channel;
+ pib->filt.short_addr = filt->short_addr;
+ pib->filt.pan_id = filt->pan_id;
+ pib->filt.ieee_addr = filt->ieee_addr;
+ pib->filt.pan_coord = filt->pan_coord;
+ pib->filt_level = filt_level;
- pib_old = rtnl_dereference(phy->pib);
rcu_assign_pointer(phy->pib, pib);
kfree_rcu(pib_old, rcu);
return 0;
}
+static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct hwsim_phy *phy = hw->priv;
+ struct hwsim_pib *pib;
+ int ret;
+
+ rcu_read_lock();
+ pib = rcu_dereference(phy->pib);
+ ret = hwsim_update_pib(hw, page, channel, &pib->filt, pib->filt_level);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int hwsim_hw_addr_filt(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct hwsim_phy *phy = hw->priv;
+ struct hwsim_pib *pib;
+ int ret;
+
+ rcu_read_lock();
+ pib = rcu_dereference(phy->pib);
+ ret = hwsim_update_pib(hw, pib->page, pib->channel, filt, pib->filt_level);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void hwsim_hw_receive(struct ieee802154_hw *hw, struct sk_buff *skb,
+ u8 lqi)
+{
+ struct ieee802154_hdr hdr;
+ struct hwsim_phy *phy = hw->priv;
+ struct hwsim_pib *pib;
+
+ rcu_read_lock();
+ pib = rcu_dereference(phy->pib);
+
+ if (!pskb_may_pull(skb, 3)) {
+ dev_dbg(hw->parent, "invalid frame\n");
+ goto drop;
+ }
+
+ memcpy(&hdr, skb->data, 3);
+
+ /* Level 4 filtering: Frame fields validity */
+ if (pib->filt_level == IEEE802154_FILTERING_4_FRAME_FIELDS) {
+ /* a) Drop reserved frame types */
+ switch (mac_cb(skb)->type) {
+ case IEEE802154_FC_TYPE_BEACON:
+ case IEEE802154_FC_TYPE_DATA:
+ case IEEE802154_FC_TYPE_ACK:
+ case IEEE802154_FC_TYPE_MAC_CMD:
+ break;
+ default:
+ dev_dbg(hw->parent, "unrecognized frame type 0x%x\n",
+ mac_cb(skb)->type);
+ goto drop;
+ }
+
+ /* b) Drop reserved frame versions */
+ switch (hdr.fc.version) {
+ case IEEE802154_2003_STD:
+ case IEEE802154_2006_STD:
+ case IEEE802154_STD:
+ break;
+ default:
+ dev_dbg(hw->parent,
+ "unrecognized frame version 0x%x\n",
+ hdr.fc.version);
+ goto drop;
+ }
+
+ /* c) PAN ID constraints */
+ if ((mac_cb(skb)->dest.mode == IEEE802154_ADDR_LONG ||
+ mac_cb(skb)->dest.mode == IEEE802154_ADDR_SHORT) &&
+ mac_cb(skb)->dest.pan_id != pib->filt.pan_id &&
+ mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
+ dev_dbg(hw->parent,
+ "unrecognized PAN ID %04x\n",
+ le16_to_cpu(mac_cb(skb)->dest.pan_id));
+ goto drop;
+ }
+
+ /* d1) Short address constraints */
+ if (mac_cb(skb)->dest.mode == IEEE802154_ADDR_SHORT &&
+ mac_cb(skb)->dest.short_addr != pib->filt.short_addr &&
+ mac_cb(skb)->dest.short_addr != cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
+ dev_dbg(hw->parent,
+ "unrecognized short address %04x\n",
+ le16_to_cpu(mac_cb(skb)->dest.short_addr));
+ goto drop;
+ }
+
+ /* d2) Extended address constraints */
+ if (mac_cb(skb)->dest.mode == IEEE802154_ADDR_LONG &&
+ mac_cb(skb)->dest.extended_addr != pib->filt.ieee_addr) {
+ dev_dbg(hw->parent,
+ "unrecognized long address 0x%016llx\n",
+ mac_cb(skb)->dest.extended_addr);
+ goto drop;
+ }
+
+ /* d4) Specific PAN coordinator case (no parent) */
+ if ((mac_cb(skb)->type == IEEE802154_FC_TYPE_DATA ||
+ mac_cb(skb)->type == IEEE802154_FC_TYPE_MAC_CMD) &&
+ mac_cb(skb)->dest.mode == IEEE802154_ADDR_NONE) {
+ dev_dbg(hw->parent,
+ "relaying is not supported\n");
+ goto drop;
+ }
+
+ /* e) Beacon frames follow specific PAN ID rules */
+ if (mac_cb(skb)->type == IEEE802154_FC_TYPE_BEACON &&
+ pib->filt.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST) &&
+ mac_cb(skb)->dest.pan_id != pib->filt.pan_id) {
+ dev_dbg(hw->parent,
+ "invalid beacon PAN ID %04x\n",
+ le16_to_cpu(mac_cb(skb)->dest.pan_id));
+ goto drop;
+ }
+ }
+
+ rcu_read_unlock();
+
+ ieee802154_rx_irqsafe(hw, skb, lqi);
+
+ return;
+
+drop:
+ rcu_read_unlock();
+ kfree_skb(skb);
+}
+
static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
struct hwsim_phy *current_phy = hw->priv;
@@ -133,8 +280,7 @@ static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
einfo = rcu_dereference(e->info);
if (newskb)
- ieee802154_rx_irqsafe(e->endpoint->hw, newskb,
- einfo->lqi);
+ hwsim_hw_receive(e->endpoint->hw, newskb, einfo->lqi);
}
}
rcu_read_unlock();
@@ -148,6 +294,7 @@ static int hwsim_hw_start(struct ieee802154_hw *hw)
struct hwsim_phy *phy = hw->priv;
phy->suspended = false;
+
return 0;
}
@@ -161,7 +308,22 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw)
static int
hwsim_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
{
- return 0;
+ enum ieee802154_filtering_level filt_level;
+ struct hwsim_phy *phy = hw->priv;
+ struct hwsim_pib *pib;
+ int ret;
+
+ if (on)
+ filt_level = IEEE802154_FILTERING_NONE;
+ else
+ filt_level = IEEE802154_FILTERING_4_FRAME_FIELDS;
+
+ rcu_read_lock();
+ pib = rcu_dereference(phy->pib);
+ ret = hwsim_update_pib(hw, pib->page, pib->channel, &pib->filt, filt_level);
+ rcu_read_unlock();
+
+ return ret;
}
static const struct ieee802154_ops hwsim_ops = {
@@ -172,6 +334,7 @@ static const struct ieee802154_ops hwsim_ops = {
.start = hwsim_hw_start,
.stop = hwsim_hw_stop,
.set_promiscuous_mode = hwsim_set_promiscuous_mode,
+ .set_hw_addr_filt = hwsim_hw_addr_filt,
};
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -788,11 +951,13 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
}
pib->channel = 13;
+ pib->filt.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+ pib->filt.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
rcu_assign_pointer(phy->pib, pib);
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
- hw->flags = IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_RX_DROP_BAD_CKSUM;
+ hw->flags = IEEE802154_HW_PROMISCUOUS;
hw->parent = dev;
err = ieee802154_register_hw(hw);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 2fe0e4a0a0c4..f53d185e0568 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -1233,12 +1233,9 @@ mcr20a_probe(struct spi_device *spi)
}
rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
- if (IS_ERR(rst_b)) {
- ret = PTR_ERR(rst_b);
- if (ret != -EPROBE_DEFER)
- dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret);
- return ret;
- }
+ if (IS_ERR(rst_b))
+ return dev_err_probe(&spi->dev, PTR_ERR(rst_b),
+ "Failed to get 'rst_b' gpio");
/* reset mcr20a */
usleep_range(10, 20);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 1c64d5347b8e..78253ad57b2e 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -162,18 +162,18 @@ static void ifb_stats64(struct net_device *dev,
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
do {
- start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync);
+ start = u64_stats_fetch_begin(&txp->rx_stats.sync);
packets = txp->rx_stats.packets;
bytes = txp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start));
+ } while (u64_stats_fetch_retry(&txp->rx_stats.sync, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
do {
- start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync);
+ start = u64_stats_fetch_begin(&txp->tx_stats.sync);
packets = txp->tx_stats.packets;
bytes = txp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start));
+ } while (u64_stats_fetch_retry(&txp->tx_stats.sync, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -245,12 +245,12 @@ static void ifb_fill_stats_data(u64 **data,
int j;
do {
- start = u64_stats_fetch_begin_irq(&q_stats->sync);
+ start = u64_stats_fetch_begin(&q_stats->sync);
for (j = 0; j < IFB_Q_STATS_LEN; j++) {
offset = ifb_q_stats_desc[j].offset;
(*data)[j] = *(u64 *)(stats_base + offset);
}
- } while (u64_stats_fetch_retry_irq(&q_stats->sync, start));
+ } while (u64_stats_fetch_retry(&q_stats->sync, start));
*data += IFB_Q_STATS_LEN;
}
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index e0d71f609272..3380fb3483b2 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -525,13 +525,14 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
- .version = IPA_VERSION_3_1,
- .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_3_1,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 42f2c88a92d4..4287114b24db 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -406,17 +406,18 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
- .version = IPA_VERSION_3_5_1,
- .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
- BIT(BCR_TX_NOT_USING_BRESP) |
- BIT(BCR_SUSPEND_L2_IRQ) |
- BIT(BCR_HOLB_DROP_L2_IRQ) |
- BIT(BCR_DUAL_TX),
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_3_5_1,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index a204e439c23d..1b4b52501ee3 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -394,12 +394,13 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.11 */
const struct ipa_data ipa_data_v4_11 = {
- .version = IPA_VERSION_4_11,
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_4_11,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 04f574fe006f..199ed0ed868b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -372,13 +372,14 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.2 */
const struct ipa_data ipa_data_v4_2 = {
- .version = IPA_VERSION_4_2,
+ .version = IPA_VERSION_4_2,
/* backward_compat value is 0 */
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 684239e71f46..19b549f2998b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -450,12 +450,13 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.5 */
const struct ipa_data ipa_data_v4_5 = {
- .version = IPA_VERSION_4_5,
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_4_5,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 2333e15f9533..d30fc1fe6ca2 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -444,12 +444,13 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.9. */
const struct ipa_data ipa_data_v4_9 = {
- .version = IPA_VERSION_4_9,
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_4_9,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 26b7f683a3e1..0f52c068c46d 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -87,6 +87,7 @@ struct gsi_tre {
int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
u32 max_alloc)
{
+ size_t alloc_size;
void *virt;
if (!size)
@@ -103,13 +104,15 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
* If there aren't enough entries starting at the free index,
* we just allocate free entries from the beginning of the pool.
*/
- virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
+ alloc_size = size_mul(count + max_alloc - 1, size);
+ alloc_size = kmalloc_size_roundup(alloc_size);
+ virt = kzalloc(alloc_size, GFP_KERNEL);
if (!virt)
return -ENOMEM;
pool->base = virt;
/* If the allocator gave us any extra memory, use it */
- pool->count = ksize(pool->base) / size;
+ pool->count = alloc_size / size;
pool->free = 0;
pool->max_alloc = max_alloc;
pool->size = size;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 09ead433ec38..5372db58b5bd 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -39,6 +39,9 @@ struct ipa_interrupt;
* @power: IPA power information
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
+ * @route_count: Total number of entries in a routing table
+ * @modem_route_count: Number of modem entries in a routing table
+ * @filter_count: Maximum number of entries in a filter table
* @interrupt: IPA Interrupt information
* @uc_powered: true if power is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
@@ -58,11 +61,13 @@ struct ipa_interrupt;
* @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory
- * @available: Bit mask indicating endpoints hardware supports
- * @filter_map: Bit mask indicating endpoints that support filtering
- * @initialized: Bit mask indicating endpoints initialized
- * @set_up: Bit mask indicating endpoints set up
- * @enabled: Bit mask indicating endpoints enabled
+ * @endpoint_count: Number of defined bits in most bitmaps below
+ * @available_count: Number of defined bits in the available bitmap
+ * @defined: Bitmap of endpoints defined in config data
+ * @available: Bitmap of endpoints supported by hardware
+ * @filtered: Bitmap of endpoints that support filtering
+ * @set_up: Bitmap of endpoints that are set up for use
+ * @enabled: Bitmap of currently enabled endpoints
* @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint
@@ -84,6 +89,9 @@ struct ipa {
dma_addr_t table_addr;
__le64 *table_virt;
+ u32 route_count;
+ u32 modem_route_count;
+ u32 filter_count;
struct ipa_interrupt *interrupt;
bool uc_powered;
@@ -110,12 +118,14 @@ struct ipa {
void *zero_virt;
size_t zero_size;
- /* Bit masks indicating endpoint state */
- u32 available; /* supported by hardware */
- u32 filter_map;
- u32 initialized;
- u32 set_up;
- u32 enabled;
+ /* Bitmaps indicating endpoint state */
+ u32 endpoint_count;
+ u32 available_count;
+ unsigned long *defined; /* Defined in configuration data */
+ unsigned long *available; /* Supported by hardware */
+ u64 filtered; /* Support filtering (AP and modem) */
+ unsigned long *set_up;
+ unsigned long *enabled;
u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 26c3db9f52b1..bb3dfa9a2bc8 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -145,20 +145,12 @@ union ipa_cmd_payload {
static void ipa_cmd_validate_build(void)
{
- /* The sizes of a filter and route tables need to fit into fields
- * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
+ /* The size of a filter table needs to fit into fields in the
+ * ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
* might not be used, non-hashed and hashed tables have the same
* maximum size. IPv4 and IPv6 filter tables have the same number
- * of entries, as and IPv4 and IPv6 route tables have the same number
* of entries.
*/
-#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
-#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
- BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
- BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
-#undef TABLE_COUNT_MAX
-#undef TABLE_SIZE
-
/* Hashed and non-hashed fields are assumed to be the same size */
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
@@ -171,18 +163,22 @@ static void ipa_cmd_validate_build(void)
}
/* Validate a memory region holding a table */
-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
+bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route)
{
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
struct device *dev = &ipa->pdev->dev;
+ u32 size;
+
+ size = route ? ipa->route_count : ipa->filter_count + 1;
+ size *= sizeof(__le64);
/* Size must fit in the immediate command field that holds it */
- if (mem->size > size_max) {
+ if (size > size_max) {
dev_err(dev, "%s table region size too large\n", table);
- dev_err(dev, " (0x%04x > 0x%04x)\n",
- mem->size, size_max);
+ dev_err(dev, " (0x%04x > 0x%04x)\n", size, size_max);
return false;
}
@@ -197,21 +193,11 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
return false;
}
- /* Entire memory range must fit within IPA-local memory */
- if (mem->offset > ipa->mem_size ||
- mem->size > ipa->mem_size - mem->offset) {
- dev_err(dev, "%s table region out of range\n", table);
- dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- mem->offset, mem->size, ipa->mem_size);
-
- return false;
- }
-
return true;
}
/* Validate the memory region that holds headers */
-static bool ipa_cmd_header_valid(struct ipa *ipa)
+static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
@@ -257,15 +243,6 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
return false;
}
- /* Make sure the entire combined area fits in IPA memory */
- if (size > ipa->mem_size || offset > ipa->mem_size - size) {
- dev_err(dev, "header table region out of range\n");
- dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- offset, size, ipa->mem_size);
-
- return false;
- }
-
return true;
}
@@ -336,26 +313,11 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
return true;
}
-bool ipa_cmd_data_valid(struct ipa *ipa)
-{
- if (!ipa_cmd_header_valid(ipa))
- return false;
-
- if (!ipa_cmd_register_write_valid(ipa))
- return false;
-
- return true;
-}
-
-
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- /* This is as good a place as any to validate build constants */
- ipa_cmd_validate_build();
-
/* Command payloads are allocated one at a time, but a single
* transaction can require up to the maximum supported by the
* channel; treat them as if they were allocated all at once.
@@ -655,3 +617,17 @@ struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
tre_count, DMA_NONE);
}
+
+/* Init function for immediate commands; there is no ipa_cmd_exit() */
+int ipa_cmd_init(struct ipa *ipa)
+{
+ ipa_cmd_validate_build();
+
+ if (!ipa_cmd_header_init_local_valid(ipa))
+ return -EINVAL;
+
+ if (!ipa_cmd_register_write_valid(ipa))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 8e4243c1f0bb..e2cf1c2b0ef2 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -47,15 +47,15 @@ enum ipa_cmd_opcode {
};
/**
- * ipa_cmd_table_valid() - Validate a memory region holding a table
+ * ipa_cmd_table_init_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
* @route: - Whether the region holds a route or filter table
*
* Return: true if region is valid, false otherwise
*/
-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
- bool route);
+bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route);
/**
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
@@ -162,4 +162,14 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
*/
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
+/**
+ * ipa_cmd_init() - Initialize IPA immediate commands
+ * @ipa: - IPA pointer
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ * There is no need for a matching ipa_cmd_exit() function.
+ */
+int ipa_cmd_init(struct ipa *ipa);
+
#endif /* _IPA_CMD_H_ */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index e5a6ce75c7dd..412edbfac786 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -222,6 +222,7 @@ struct ipa_power_data {
* @backward_compat: BCR register value (prior to IPA v4.5 only)
* @qsb_count: number of entries in the qsb_data array
* @qsb_data: Qualcomm System Bus configuration data
+ * @modem_route_count: number of modem entries in a routing table
* @endpoint_count: number of entries in the endpoint_data array
* @endpoint_data: IPA endpoint/GSI channel data
* @resource_data: IPA resource configuration data
@@ -233,6 +234,7 @@ struct ipa_data {
u32 backward_compat;
u32 qsb_count; /* number of entries in qsb_data[] */
const struct ipa_qsb_data *qsb_data;
+ u32 modem_route_count;
u32 endpoint_count; /* number of entries in endpoint_data[] */
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 093e11ec7c2d..136932464261 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -243,42 +243,47 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
-static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
- const struct ipa_gsi_endpoint_data *data)
+/* Validate endpoint configuration data. Return max defined endpoint ID */
+static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
+ u32 max;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
- return false;
+ return 0;
}
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
- return false;
+ return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
dev_err(dev, "LAN RX endpoint not defined\n");
- return false;
+ return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
dev_err(dev, "AP->modem TX endpoint not defined\n");
- return false;
+ return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
dev_err(dev, "AP<-modem RX endpoint not defined\n");
- return false;
+ return 0;
}
- for (name = 0; name < count; name++, dp++)
+ max = 0;
+ for (name = 0; name < count; name++, dp++) {
if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
- return false;
+ return 0;
+ max = max_t(u32, max, dp->endpoint_id);
+ }
- return true;
+ return max;
}
/* Allocate a transaction to use on a non-command endpoint */
@@ -345,29 +350,32 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
- u32 mask = BIT(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
u32 val;
- WARN_ON(!(mask & ipa->available));
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
- return !!(val & mask);
+ return !!(val & BIT(endpoint_id % 32));
}
static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
- u32 mask = BIT(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
+ u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
- WARN_ON(!(mask & ipa->available));
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
- iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
}
/**
@@ -426,10 +434,10 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
*/
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
- u32 endpoint_id;
+ u32 endpoint_id = 0;
- for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
- struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
+ while (endpoint_id < ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
@@ -448,8 +456,8 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
/* Reset all modem endpoints to use the default exception endpoint */
int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{
- u32 initialized = ipa->initialized;
struct gsi_trans *trans;
+ u32 endpoint_id;
u32 count;
/* We need one command per modem TX endpoint, plus the commands
@@ -463,14 +471,11 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
return -EBUSY;
}
- while (initialized) {
- u32 endpoint_id = __ffs(initialized);
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
const struct ipa_reg *reg;
u32 offset;
- initialized ^= BIT(endpoint_id);
-
/* We only reset modem TX endpoints */
endpoint = &ipa->endpoint[endpoint_id];
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
@@ -1008,10 +1013,10 @@ static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{
- u32 i;
+ u32 endpoint_id = 0;
- for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
- struct ipa_endpoint *endpoint = &ipa->endpoint[i];
+ while (endpoint_id < ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
@@ -1661,6 +1666,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret;
@@ -1670,37 +1676,35 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
dev_err(&ipa->pdev->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
- endpoint->channel_id, endpoint->endpoint_id);
+ endpoint->channel_id, endpoint_id);
return ret;
}
if (!endpoint->toward_ipa) {
- ipa_interrupt_suspend_enable(ipa->interrupt,
- endpoint->endpoint_id);
+ ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
ipa_endpoint_replenish_enable(endpoint);
}
- ipa->enabled |= BIT(endpoint->endpoint_id);
+ __set_bit(endpoint_id, ipa->enabled);
return 0;
}
void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{
- u32 mask = BIT(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret;
- if (!(ipa->enabled & mask))
+ if (!test_bit(endpoint_id, ipa->enabled))
return;
- ipa->enabled ^= mask;
+ __clear_bit(endpoint_id, endpoint->ipa->enabled);
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
- ipa_interrupt_suspend_disable(ipa->interrupt,
- endpoint->endpoint_id);
+ ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
}
/* Note that if stop fails, the channel's state is not well-defined */
@@ -1708,7 +1712,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
if (ret)
dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret,
- endpoint->endpoint_id);
+ endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
@@ -1717,7 +1721,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
- if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return;
if (!endpoint->toward_ipa) {
@@ -1737,7 +1741,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
- if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return;
if (!endpoint->toward_ipa)
@@ -1797,12 +1801,12 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
ipa_endpoint_program(endpoint);
- endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
+ __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
}
static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
{
- endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
+ __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
if (!endpoint->toward_ipa)
cancel_delayed_work_sync(&endpoint->replenish_work);
@@ -1812,45 +1816,39 @@ static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_setup(struct ipa *ipa)
{
- u32 initialized = ipa->initialized;
-
- ipa->set_up = 0;
- while (initialized) {
- u32 endpoint_id = __ffs(initialized);
-
- initialized ^= BIT(endpoint_id);
+ u32 endpoint_id;
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
- }
}
void ipa_endpoint_teardown(struct ipa *ipa)
{
- u32 set_up = ipa->set_up;
-
- while (set_up) {
- u32 endpoint_id = __fls(set_up);
-
- set_up ^= BIT(endpoint_id);
+ u32 endpoint_id;
+ for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
- }
- ipa->set_up = 0;
+}
+
+void ipa_endpoint_deconfig(struct ipa *ipa)
+{
+ ipa->available_count = 0;
+ bitmap_free(ipa->available);
+ ipa->available = NULL;
}
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_reg *reg;
- u32 initialized;
+ u32 endpoint_id;
+ u32 tx_count;
+ u32 rx_count;
u32 rx_base;
- u32 rx_mask;
- u32 tx_mask;
- int ret = 0;
- u32 max;
+ u32 limit;
u32 val;
- /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
+ /* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
* Furthermore, the endpoints were not grouped such that TX
* endpoint numbers started with 0 and RX endpoints had numbers
* higher than all TX endpoints, so we can't do the simple
@@ -1861,61 +1859,78 @@ int ipa_endpoint_config(struct ipa *ipa)
* assume the configuration is valid.
*/
if (ipa->version < IPA_VERSION_3_5) {
- ipa->available = ~0;
+ ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
+ if (!ipa->available)
+ return -ENOMEM;
+ ipa->available_count = IPA_ENDPOINT_MAX;
+
+ bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
+
return 0;
}
/* Find out about the endpoints supplied by the hardware, and ensure
- * the highest one doesn't exceed the number we support.
+ * the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
- /* Our RX is an IPA producer */
+ /* Our RX is an IPA producer; our TX is an IPA consumer. */
+ tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
- max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
- if (max > IPA_ENDPOINT_MAX) {
- dev_err(dev, "too many endpoints (%u > %u)\n",
- max, IPA_ENDPOINT_MAX);
+
+ limit = rx_base + rx_count;
+ if (limit > IPA_ENDPOINT_MAX) {
+ dev_err(dev, "too many endpoints, %u > %u\n",
+ limit, IPA_ENDPOINT_MAX);
return -EINVAL;
}
- rx_mask = GENMASK(max - 1, rx_base);
- /* Our TX is an IPA consumer */
- max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
- tx_mask = GENMASK(max - 1, 0);
-
- ipa->available = rx_mask | tx_mask;
+ /* Allocate and initialize the available endpoint bitmap */
+ ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
+ if (!ipa->available)
+ return -ENOMEM;
+ ipa->available_count = limit;
- /* Check for initialized endpoints not supported by the hardware */
- if (ipa->initialized & ~ipa->available) {
- dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
- ipa->initialized & ~ipa->available);
- ret = -EINVAL; /* Report other errors too */
- }
+ /* Mark all supported RX and TX endpoints as available */
+ bitmap_set(ipa->available, 0, tx_count);
+ bitmap_set(ipa->available, rx_base, rx_count);
- initialized = ipa->initialized;
- while (initialized) {
- u32 endpoint_id = __ffs(initialized);
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
- initialized ^= BIT(endpoint_id);
+ if (endpoint_id >= limit) {
+ dev_err(dev, "invalid endpoint id, %u > %u\n",
+ endpoint_id, limit - 1);
+ goto err_free_bitmap;
+ }
+
+ if (!test_bit(endpoint_id, ipa->available)) {
+ dev_err(dev, "unavailable endpoint id %u\n",
+ endpoint_id);
+ goto err_free_bitmap;
+ }
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id];
- if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
- dev_err(dev, "endpoint id %u wrong direction\n",
- endpoint_id);
- ret = -EINVAL;
+ if (endpoint->toward_ipa) {
+ if (endpoint_id < tx_count)
+ continue;
+ } else if (endpoint_id >= rx_base) {
+ continue;
}
+
+ dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
+ goto err_free_bitmap;
}
- return ret;
-}
+ return 0;
-void ipa_endpoint_deconfig(struct ipa *ipa)
-{
- ipa->available = 0; /* Nothing more to do */
+err_free_bitmap:
+ ipa_endpoint_deconfig(ipa);
+
+ return -EINVAL;
}
static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
@@ -1936,46 +1951,64 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->toward_ipa = data->toward_ipa;
endpoint->config = data->endpoint.config;
- ipa->initialized |= BIT(endpoint->endpoint_id);
+ __set_bit(endpoint->endpoint_id, ipa->defined);
}
static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{
- endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
+ __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
memset(endpoint, 0, sizeof(*endpoint));
}
void ipa_endpoint_exit(struct ipa *ipa)
{
- u32 initialized = ipa->initialized;
-
- while (initialized) {
- u32 endpoint_id = __fls(initialized);
+ u32 endpoint_id;
- initialized ^= BIT(endpoint_id);
+ ipa->filtered = 0;
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
- }
+
+ bitmap_free(ipa->enabled);
+ ipa->enabled = NULL;
+ bitmap_free(ipa->set_up);
+ ipa->set_up = NULL;
+ bitmap_free(ipa->defined);
+ ipa->defined = NULL;
+
memset(ipa->name_map, 0, sizeof(ipa->name_map));
memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
}
/* Returns a bitmask of endpoints that support filtering, or 0 on error */
-u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
enum ipa_endpoint_name name;
- u32 filter_map;
+ u32 filtered;
BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
- if (!ipa_endpoint_data_valid(ipa, count, data))
- return 0; /* Error */
+ /* Number of endpoints is one more than the maximum ID */
+ ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
+ if (!ipa->endpoint_count)
+ return -EINVAL;
+
+ /* Initialize endpoint state bitmaps */
+ ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->defined)
+ return -ENOMEM;
+
+ ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->set_up)
+ goto err_free_defined;
- ipa->initialized = 0;
+ ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->enabled)
+ goto err_free_set_up;
- filter_map = 0;
+ filtered = 0;
for (name = 0; name < count; name++, data++) {
if (ipa_gsi_endpoint_data_empty(data))
continue; /* Skip over empty slots */
@@ -1983,18 +2016,28 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
ipa_endpoint_init_one(ipa, name, data);
if (data->endpoint.filter_support)
- filter_map |= BIT(data->endpoint_id);
+ filtered |= BIT(data->endpoint_id);
if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
ipa->modem_tx_count++;
}
- if (!ipa_filter_map_valid(ipa, filter_map))
- goto err_endpoint_exit;
+ /* Make sure the set of filtered endpoints is valid */
+ if (!ipa_filtered_valid(ipa, filtered)) {
+ ipa_endpoint_exit(ipa);
- return filter_map; /* Non-zero bitmask */
+ return -EINVAL;
+ }
-err_endpoint_exit:
- ipa_endpoint_exit(ipa);
+ ipa->filtered = filtered;
- return 0; /* Error */
+ return 0;
+
+err_free_set_up:
+ bitmap_free(ipa->set_up);
+ ipa->set_up = NULL;
+err_free_defined:
+ bitmap_free(ipa->defined);
+ ipa->defined = NULL;
+
+ return -ENOMEM;
}
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index d8dfa24f5214..4a5c3bc549df 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -195,7 +195,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa);
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
void ipa_endpoint_default_route_clear(struct ipa *ipa);
-u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data);
void ipa_endpoint_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c269432f9c2e..a49f66efacb8 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -132,24 +132,28 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 endpoint_id, bool enable)
{
struct ipa *ipa = interrupt->ipa;
- u32 mask = BIT(endpoint_id);
+ u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
u32 offset;
+ u32 mask;
u32 val;
- WARN_ON(!(mask & ipa->available));
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0)
return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
- offset = ipa_reg_offset(reg);
+ offset = ipa_reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
+
+ mask = BIT(endpoint_id);
if (enable)
val |= mask;
else
val &= ~mask;
+
iowrite32(val, ipa->reg_virt + offset);
}
@@ -171,18 +175,24 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
- u32 val;
+ u32 unit_count;
+ u32 unit;
- reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ unit_count = roundup(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct ipa_reg *reg;
+ u32 val;
- /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
- if (ipa->version == IPA_VERSION_3_0)
- return;
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
- reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ }
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 49537fccf6ad..8f20825675a1 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -82,6 +82,23 @@
#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
/**
+ * enum ipa_firmware_loader: How GSI firmware gets loaded
+ *
+ * @IPA_LOADER_DEFER: System not ready; try again later
+ * @IPA_LOADER_SELF: AP loads GSI firmware
+ * @IPA_LOADER_MODEM: Modem loads GSI firmware, signals when done
+ * @IPA_LOADER_SKIP: Neither AP nor modem need to load GSI firmware
+ * @IPA_LOADER_INVALID: GSI firmware loader specification is invalid
+ */
+enum ipa_firmware_loader {
+ IPA_LOADER_DEFER,
+ IPA_LOADER_SELF,
+ IPA_LOADER_MODEM,
+ IPA_LOADER_SKIP,
+ IPA_LOADER_INVALID,
+};
+
+/**
* ipa_setup() - Set up IPA hardware
* @ipa: IPA pointer
*
@@ -696,6 +713,50 @@ static void ipa_validate_build(void)
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
}
+static enum ipa_firmware_loader ipa_firmware_loader(struct device *dev)
+{
+ bool modem_init;
+ const char *str;
+ int ret;
+
+ /* Look up the old and new properties by name */
+ modem_init = of_property_read_bool(dev->of_node, "modem-init");
+ ret = of_property_read_string(dev->of_node, "qcom,gsi-loader", &str);
+
+ /* If the new property doesn't exist, it's legacy behavior */
+ if (ret == -EINVAL) {
+ if (modem_init)
+ return IPA_LOADER_MODEM;
+ goto out_self;
+ }
+
+ /* Any other error on the new property means it's poorly defined */
+ if (ret)
+ return IPA_LOADER_INVALID;
+
+ /* New property value exists; if old one does too, that's invalid */
+ if (modem_init)
+ return IPA_LOADER_INVALID;
+
+ /* Modem loads GSI firmware for "modem" */
+ if (!strcmp(str, "modem"))
+ return IPA_LOADER_MODEM;
+
+ /* No GSI firmware load is needed for "skip" */
+ if (!strcmp(str, "skip"))
+ return IPA_LOADER_SKIP;
+
+ /* Any value other than "self" is an error */
+ if (strcmp(str, "self"))
+ return IPA_LOADER_INVALID;
+out_self:
+ /* We need Trust Zone to load firmware; make sure it's available */
+ if (qcom_scm_is_available())
+ return IPA_LOADER_SELF;
+
+ return IPA_LOADER_DEFER;
+}
+
/**
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
@@ -722,9 +783,9 @@ static void ipa_validate_build(void)
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
- bool modem_init;
struct ipa *ipa;
int ret;
@@ -742,11 +803,16 @@ static int ipa_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* If we need Trust Zone, make sure it's available */
- modem_init = of_property_read_bool(dev->of_node, "modem-init");
- if (!modem_init)
- if (!qcom_scm_is_available())
- return -EPROBE_DEFER;
+ if (!data->modem_route_count) {
+ dev_err(dev, "modem_route_count cannot be zero\n");
+ return -EINVAL;
+ }
+
+ loader = ipa_firmware_loader(dev);
+ if (loader == IPA_LOADER_INVALID)
+ return -EINVAL;
+ if (loader == IPA_LOADER_DEFER)
+ return -EPROBE_DEFER;
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
@@ -766,6 +832,7 @@ static int ipa_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ipa);
ipa->power = power;
ipa->version = data->version;
+ ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
@@ -782,18 +849,15 @@ static int ipa_probe(struct platform_device *pdev)
goto err_mem_exit;
/* Result is a non-zero mask of endpoints that support filtering */
- ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
- data->endpoint_data);
- if (!ipa->filter_map) {
- ret = -EINVAL;
+ ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
+ if (ret)
goto err_gsi_exit;
- }
ret = ipa_table_init(ipa);
if (ret)
goto err_endpoint_exit;
- ret = ipa_smp2p_init(ipa, modem_init);
+ ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
@@ -808,20 +872,20 @@ static int ipa_probe(struct platform_device *pdev)
dev_info(dev, "IPA driver initialized");
- /* If the modem is doing early initialization, it will trigger a
- * call to ipa_setup() when it has finished. In that case we're
- * done here.
+ /* If the modem is loading GSI firmware, it will trigger a call to
+ * ipa_setup() when it has finished. In that case we're done here.
*/
- if (modem_init)
+ if (loader == IPA_LOADER_MODEM)
goto done;
- /* Otherwise we need to load the firmware and have Trust Zone validate
- * and install it. If that succeeds we can proceed with setup.
- */
- ret = ipa_firmware_load(dev);
- if (ret)
- goto err_deconfig;
+ if (loader == IPA_LOADER_SELF) {
+ /* The AP is loading GSI firmware; do so now */
+ ret = ipa_firmware_load(dev);
+ if (ret)
+ goto err_deconfig;
+ } /* Otherwise loader == IPA_LOADER_SKIP */
+ /* GSI firmware is loaded; proceed to setup */
ret = ipa_setup(ipa);
if (ret)
goto err_deconfig;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index f84c6830495a..9ec5af323f73 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -198,9 +198,12 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
case IPA_MEM_PDN_CONFIG:
case IPA_MEM_STATS_QUOTA_MODEM:
- case IPA_MEM_STATS_TETHERING:
return ipa->version >= IPA_VERSION_4_0;
+ case IPA_MEM_STATS_TETHERING:
+ return ipa->version >= IPA_VERSION_4_0 &&
+ ipa->version != IPA_VERSION_5_0;
+
default:
return false; /* Anything else is optional */
}
@@ -366,14 +369,6 @@ int ipa_mem_config(struct ipa *ipa)
while (--canary_count);
}
- /* Make sure filter and route table memory regions are valid */
- if (!ipa_table_valid(ipa))
- goto err_dma_free;
-
- /* Validate memory-related properties relevant to immediate commands */
- if (!ipa_cmd_data_valid(ipa))
- goto err_dma_free;
-
/* Verify the microcontroller ring alignment (if defined) */
mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
if (mem && mem->offset % 1024) {
@@ -625,6 +620,12 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_count = mem_data->local_count;
ipa->mem = mem_data->local;
+ /* Check the route and filter table memory regions */
+ if (!ipa_table_mem_valid(ipa, false))
+ return -EINVAL;
+ if (!ipa_table_mem_valid(ipa, true))
+ return -EINVAL;
+
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 8295fd4b70d1..f70f0a1d1cda 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -284,6 +284,7 @@ static const struct ipa_init_modem_driver_req *
init_modem_driver_req(struct ipa_qmi *ipa_qmi)
{
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ u32 modem_route_count = ipa->modem_route_count;
static struct ipa_init_modem_driver_req req;
const struct ipa_mem *mem;
@@ -308,12 +309,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v4_route_tbl_info.end = modem_route_count - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v6_route_tbl_info.end = modem_route_count - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +353,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v4_hash_route_tbl_info.end = modem_route_count - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +361,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v6_hash_route_tbl_info.end = modem_route_count - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 97c0befe8d86..894f99517233 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -9,7 +9,7 @@
#include "ipa_qmi_msg.h"
/* QMI message structure definition for struct ipa_indication_register_req */
-struct qmi_elem_info ipa_indication_register_req_ei[] = {
+const struct qmi_elem_info ipa_indication_register_req_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -116,7 +116,7 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = {
};
/* QMI message structure definition for struct ipa_indication_register_rsp */
-struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
+const struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -134,7 +134,7 @@ struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
};
/* QMI message structure definition for struct ipa_driver_init_complete_req */
-struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
+const struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -151,7 +151,7 @@ struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
};
/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
-struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
+const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -169,7 +169,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
};
/* QMI message structure definition for struct ipa_init_complete_ind */
-struct qmi_elem_info ipa_init_complete_ind_ei[] = {
+const struct qmi_elem_info ipa_init_complete_ind_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -187,7 +187,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = {
};
/* QMI message structure definition for struct ipa_mem_bounds */
-struct qmi_elem_info ipa_mem_bounds_ei[] = {
+const struct qmi_elem_info ipa_mem_bounds_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -208,7 +208,7 @@ struct qmi_elem_info ipa_mem_bounds_ei[] = {
};
/* QMI message structure definition for struct ipa_mem_array */
-struct qmi_elem_info ipa_mem_array_ei[] = {
+const struct qmi_elem_info ipa_mem_array_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -229,7 +229,7 @@ struct qmi_elem_info ipa_mem_array_ei[] = {
};
/* QMI message structure definition for struct ipa_mem_range */
-struct qmi_elem_info ipa_mem_range_ei[] = {
+const struct qmi_elem_info ipa_mem_range_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -250,7 +250,7 @@ struct qmi_elem_info ipa_mem_range_ei[] = {
};
/* QMI message structure definition for struct ipa_init_modem_driver_req */
-struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+const struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -645,7 +645,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
};
/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
-struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
+const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index e29663965f43..b73503552c4d 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -247,15 +247,15 @@ struct ipa_init_modem_driver_rsp {
};
/* Message structure definitions defined in "ipa_qmi_msg.c" */
-extern struct qmi_elem_info ipa_indication_register_req_ei[];
-extern struct qmi_elem_info ipa_indication_register_rsp_ei[];
-extern struct qmi_elem_info ipa_driver_init_complete_req_ei[];
-extern struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
-extern struct qmi_elem_info ipa_init_complete_ind_ei[];
-extern struct qmi_elem_info ipa_mem_bounds_ei[];
-extern struct qmi_elem_info ipa_mem_array_ei[];
-extern struct qmi_elem_info ipa_mem_range_ei[];
-extern struct qmi_elem_info ipa_init_modem_driver_req_ei[];
-extern struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
+extern const struct qmi_elem_info ipa_indication_register_req_ei[];
+extern const struct qmi_elem_info ipa_indication_register_rsp_ei[];
+extern const struct qmi_elem_info ipa_driver_init_complete_req_ei[];
+extern const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
+extern const struct qmi_elem_info ipa_init_complete_ind_ei[];
+extern const struct qmi_elem_info ipa_mem_bounds_ei[];
+extern const struct qmi_elem_info ipa_mem_array_ei[];
+extern const struct qmi_elem_info ipa_mem_range_ei[];
+extern const struct qmi_elem_info ipa_init_modem_driver_req_ei[];
+extern const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
#endif /* !_IPA_QMI_MSG_H_ */
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index 5cbc15a971f9..14bd2f903045 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -46,7 +46,7 @@ version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_version_string(ipa));
+ return sysfs_emit(buf, "%s\n", ipa_version_string(ipa));
}
static DEVICE_ATTR_RO(version);
@@ -70,7 +70,7 @@ static ssize_t rx_offload_show(struct device *dev,
{
struct ipa *ipa = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+ return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
}
static DEVICE_ATTR_RO(rx_offload);
@@ -80,7 +80,7 @@ static ssize_t tx_offload_show(struct device *dev,
{
struct ipa *ipa = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+ return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
}
static DEVICE_ATTR_RO(tx_offload);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 510ff2dc8999..b81e27b61354 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -32,8 +32,8 @@
* endian 64-bit "slot" that holds the address of a rule definition. (The
* size of these slots is 64 bits regardless of the host DMA address size.)
*
- * Separate tables (both filter and route) used for IPv4 and IPv6. There
- * are normally another set of "hashed" filter and route tables, which are
+ * Separate tables (both filter and route) are used for IPv4 and IPv6. There
+ * is normally another set of "hashed" filter and route tables, which are
* used with a hash of message metadata. Hashed operation is not supported
* by all IPA hardware (IPA v4.2 doesn't support hashed tables).
*
@@ -51,19 +51,32 @@
* Each filter rule is associated with an AP or modem TX endpoint, though
* not all TX endpoints support filtering. The first 64-bit slot in a
* filter table is a bitmap indicating which endpoints have entries in
- * the table. The low-order bit (bit 0) in this bitmap represents a
- * special global filter, which applies to all traffic. This is not
- * used in the current code. Bit 1, if set, indicates that there is an
- * entry (i.e. slot containing a system address referring to a rule) for
- * endpoint 0 in the table. Bit 3, if set, indicates there is an entry
- * for endpoint 2, and so on. Space is set aside in IPA local memory to
- * hold as many filter table entries as might be required, but typically
- * they are not all used.
+ * the table. Each set bit in this bitmap indicates the presence of the
+ * address of a filter rule in the memory following the bitmap. Until IPA
+ * v5.0, the low-order bit (bit 0) in this bitmap represents a special
+ * global filter, which applies to all traffic. Otherwise the position of
+ * each set bit represents an endpoint for which a filter rule is defined.
+ *
+ * The global rule is not used in current code, and support for it is
+ * removed starting at IPA v5.0. For IPA v5.0+, the endpoint bitmap
+ * position defines the endpoint ID--i.e. if bit 1 is set in the endpoint
+ * bitmap, endpoint 1 has a filter rule. Older versions of IPA represent
+ * the presence of a filter rule for endpoint X by bit (X + 1) being set.
+ * I.e., bit 1 set indicates the presence of a filter rule for endpoint 0,
+ * and bit 3 set means there is a filter rule present for endpoint 2.
+ *
+ * Each filter table entry has the address of a set of equations that
+ * implement a filter rule. So following the endpoint bitmap there
+ * will be such an address/entry for each endpoint with a set bit in
+ * the bitmap.
*
* The AP initializes all entries in a filter table to refer to a "zero"
- * entry. Once initialized the modem and AP update the entries for
- * endpoints they "own" directly. Currently the AP does not use the
- * IPA filtering functionality.
+ * rule. Once initialized, the modem and AP update the entries for
+ * endpoints they "own" directly. Currently the AP does not use the IPA
+ * filtering functionality.
+ *
+ * This diagram shows an example of a filter table with an endpoint
+ * bitmap as defined prior to IPA v5.0.
*
* IPA Filter Table
* ----------------------
@@ -106,12 +119,6 @@
* ----------------------
*/
-/* Assignment of route table entries to the modem and AP */
-#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
-#define IPA_ROUTE_AP_COUNT \
- (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
-
/* Filter or route rules consist of a set of 32-bit values followed by a
* 32-bit all-zero rule list terminator. The "zero rule" is simply an
* all-zero rule followed by the list terminator.
@@ -135,85 +142,40 @@ static void ipa_table_validate_build(void)
* assumes that it can be written using a pointer to __le64.
*/
BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
-
- /* Impose a practical limit on the number of routes */
- BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
- /* The modem must be allotted at least one route table entry */
- BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
- /* But it can't have more than what is available */
- BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
-
-}
-
-static bool
-ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
-{
- const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
- struct device *dev = &ipa->pdev->dev;
- u32 size;
-
- if (route)
- size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
- else
- size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
-
- if (!ipa_cmd_table_valid(ipa, mem, route))
- return false;
-
- /* mem->size >= size is sufficient, but we'll demand more */
- if (mem->size == size)
- return true;
-
- /* Hashed table regions can be zero size if hashing is not supported */
- if (ipa_table_hash_support(ipa) && !mem->size)
- return true;
-
- dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
- route ? "route" : "filter", mem_id, mem->size, size);
-
- return false;
}
-/* Verify the filter and route table memory regions are the expected size */
-bool ipa_table_valid(struct ipa *ipa)
+static const struct ipa_mem *
+ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
{
- bool valid;
-
- valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
-
- if (!ipa_table_hash_support(ipa))
- return valid;
-
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
- false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
- false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
- true);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
- true);
-
- return valid;
+ enum ipa_mem_id mem_id;
+
+ mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED
+ : IPA_MEM_V4_FILTER_HASHED
+ : ipv6 ? IPA_MEM_V6_FILTER
+ : IPA_MEM_V4_FILTER
+ : hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED
+ : IPA_MEM_V4_ROUTE_HASHED
+ : ipv6 ? IPA_MEM_V6_ROUTE
+ : IPA_MEM_V4_ROUTE;
+
+ return ipa_mem_find(ipa, mem_id);
}
-bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
+bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
struct device *dev = &ipa->pdev->dev;
u32 count;
- if (!filter_map) {
+ if (!filtered) {
dev_err(dev, "at least one filtering endpoint is required\n");
return false;
}
- count = hweight32(filter_map);
- if (count > IPA_FILTER_COUNT_MAX) {
- dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
- count, IPA_FILTER_COUNT_MAX);
+ count = hweight64(filtered);
+ if (count > ipa->filter_count) {
+ dev_err(dev, "too many filtering endpoints (%u > %u)\n",
+ count, ipa->filter_count);
return false;
}
@@ -229,7 +191,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
if (!count)
return 0;
- WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX));
+ WARN_ON(count > max_t(u32, ipa->filter_count, ipa->route_count));
/* Skip over the zero rule and possibly the filter mask */
skip = filter_mask ? 1 : 2;
@@ -238,16 +200,17 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
- u16 first, u16 count, enum ipa_mem_id mem_id)
+ bool hashed, bool ipv6, u16 first, u16 count)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
+ const struct ipa_mem *mem;
dma_addr_t addr;
u32 offset;
u16 size;
- /* Nothing to do if the table memory region is empty */
- if (!mem->size)
+ /* Nothing to do if the memory region is doesn't exist or is empty */
+ mem = ipa_table_mem(ipa, filter, hashed, ipv6);
+ if (!mem || !mem->size)
return;
if (filter)
@@ -265,14 +228,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
-ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
+ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
{
- u32 ep_mask = ipa->filter_map;
- u32 count = hweight32(ep_mask);
+ u64 ep_mask = ipa->filtered;
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
- trans = ipa_cmd_trans_alloc(ipa, count);
+ trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for %s filter reset\n",
@@ -291,7 +253,7 @@ ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
if (endpoint->ee_id != ee_id)
continue;
- ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
+ ipa_table_reset_add(trans, true, hashed, ipv6, endpoint_id, 1);
}
gsi_trans_commit_wait(trans);
@@ -307,18 +269,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
+ ret = ipa_filter_reset_table(ipa, false, false, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
+ ret = ipa_filter_reset_table(ipa, true, false, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
+ ret = ipa_filter_reset_table(ipa, false, true, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
+ ret = ipa_filter_reset_table(ipa, true, true, modem);
return ret;
}
@@ -329,6 +291,7 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
* */
static int ipa_route_reset(struct ipa *ipa, bool modem)
{
+ u32 modem_route_count = ipa->modem_route_count;
struct gsi_trans *trans;
u16 first;
u16 count;
@@ -342,20 +305,18 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
}
if (modem) {
- first = IPA_ROUTE_MODEM_MIN;
- count = IPA_ROUTE_MODEM_COUNT;
+ first = 0;
+ count = modem_route_count;
} else {
- first = IPA_ROUTE_AP_MIN;
- count = IPA_ROUTE_AP_COUNT;
+ first = modem_route_count;
+ count = ipa->route_count - modem_route_count;
}
- ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
- ipa_table_reset_add(trans, false, first, count,
- IPA_MEM_V4_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, false, false, first, count);
+ ipa_table_reset_add(trans, false, true, false, first, count);
- ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
- ipa_table_reset_add(trans, false, first, count,
- IPA_MEM_V6_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, false, true, first, count);
+ ipa_table_reset_add(trans, false, true, true, first, count);
gsi_trans_commit_wait(trans);
@@ -413,16 +374,15 @@ int ipa_table_hash_flush(struct ipa *ipa)
return 0;
}
-static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
- enum ipa_cmd_opcode opcode,
- enum ipa_mem_id mem_id,
- enum ipa_mem_id hash_mem_id)
+static void ipa_table_init_add(struct gsi_trans *trans, bool filter, bool ipv6)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
- const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
+ const struct ipa_mem *hash_mem;
+ enum ipa_cmd_opcode opcode;
+ const struct ipa_mem *mem;
dma_addr_t hash_addr;
dma_addr_t addr;
+ u32 hash_offset;
u32 zero_offset;
u16 hash_count;
u32 zero_size;
@@ -430,6 +390,16 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
u16 count;
u16 size;
+ opcode = filter ? ipv6 ? IPA_CMD_IP_V6_FILTER_INIT
+ : IPA_CMD_IP_V4_FILTER_INIT
+ : ipv6 ? IPA_CMD_IP_V6_ROUTING_INIT
+ : IPA_CMD_IP_V4_ROUTING_INIT;
+
+ /* The non-hashed region will exist (see ipa_table_mem_valid()) */
+ mem = ipa_table_mem(ipa, filter, false, ipv6);
+ hash_mem = ipa_table_mem(ipa, filter, true, ipv6);
+ hash_offset = hash_mem ? hash_mem->offset : 0;
+
/* Compute the number of table entries to initialize */
if (filter) {
/* The number of filtering endpoints determines number of
@@ -437,14 +407,14 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
* to hold the bitmap itself. The size of the hashed filter
* table is either the same as the non-hashed one, or zero.
*/
- count = 1 + hweight32(ipa->filter_map);
- hash_count = hash_mem->size ? count : 0;
+ count = 1 + hweight64(ipa->filtered);
+ hash_count = hash_mem && hash_mem->size ? count : 0;
} else {
/* The size of a route table region determines the number
* of entries it has.
*/
count = mem->size / sizeof(__le64);
- hash_count = hash_mem->size / sizeof(__le64);
+ hash_count = hash_mem ? hash_mem->size / sizeof(__le64) : 0;
}
size = count * sizeof(__le64);
hash_size = hash_count * sizeof(__le64);
@@ -453,7 +423,7 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
hash_addr = ipa_table_addr(ipa, filter, hash_count);
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
- hash_size, hash_mem->offset, hash_addr);
+ hash_size, hash_offset, hash_addr);
if (!filter)
return;
@@ -466,7 +436,7 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
return;
/* Zero the unused space in the hashed filter table */
- zero_offset = hash_mem->offset + hash_size;
+ zero_offset = hash_offset + hash_size;
zero_size = hash_mem->size - hash_size;
ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
ipa->zero_addr, true);
@@ -495,17 +465,10 @@ int ipa_table_setup(struct ipa *ipa)
return -EBUSY;
}
- ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
- IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
-
- ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
- IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
-
- ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
- IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
-
- ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
- IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
+ ipa_table_init_add(trans, false, false);
+ ipa_table_init_add(trans, false, true);
+ ipa_table_init_add(trans, true, false);
+ ipa_table_init_add(trans, true, true);
gsi_trans_commit_wait(trans);
@@ -542,7 +505,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
static void ipa_filter_config(struct ipa *ipa, bool modem)
{
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
- u32 ep_mask = ipa->filter_map;
+ u64 ep_mask = ipa->filtered;
if (!ipa_table_hash_support(ipa))
return;
@@ -559,10 +522,9 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
}
}
-static bool ipa_route_id_modem(u32 route_id)
+static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
{
- return route_id >= IPA_ROUTE_MODEM_MIN &&
- route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
+ return route_id < ipa->modem_route_count;
}
/**
@@ -597,8 +559,8 @@ static void ipa_route_config(struct ipa *ipa, bool modem)
if (!ipa_table_hash_support(ipa))
return;
- for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
- if (ipa_route_id_modem(route_id) == modem)
+ for (route_id = 0; route_id < ipa->route_count; route_id++)
+ if (ipa_route_id_modem(ipa, route_id) == modem)
ipa_route_tuple_zero(ipa, route_id);
}
@@ -611,14 +573,94 @@ void ipa_table_config(struct ipa *ipa)
ipa_route_config(ipa, true);
}
-/*
- * Initialize a coherent DMA allocation containing initialized filter and
+/* Verify the sizes of all IPA table filter or routing table memory regions
+ * are valid. If valid, this records the size of the routing table.
+ */
+bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
+{
+ bool hash_support = ipa_table_hash_support(ipa);
+ const struct ipa_mem *mem_hashed;
+ const struct ipa_mem *mem_ipv4;
+ const struct ipa_mem *mem_ipv6;
+ u32 count;
+
+ /* IPv4 and IPv6 non-hashed tables are expected to be defined and
+ * have the same size. Both must have at least two entries (and
+ * would normally have more than that).
+ */
+ mem_ipv4 = ipa_table_mem(ipa, filter, false, false);
+ if (!mem_ipv4)
+ return false;
+
+ mem_ipv6 = ipa_table_mem(ipa, filter, false, true);
+ if (!mem_ipv6)
+ return false;
+
+ if (mem_ipv4->size != mem_ipv6->size)
+ return false;
+
+ /* Compute and record the number of entries for each table type */
+ count = mem_ipv4->size / sizeof(__le64);
+ if (count < 2)
+ return false;
+ if (filter)
+ ipa->filter_count = count - 1; /* Filter map in first entry */
+ else
+ ipa->route_count = count;
+
+ /* Table offset and size must fit in TABLE_INIT command fields */
+ if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter))
+ return false;
+
+ /* Make sure the regions are big enough */
+ if (filter) {
+ /* Filter tables must able to hold the endpoint bitmap plus
+ * an entry for each endpoint that supports filtering
+ */
+ if (count < 1 + hweight64(ipa->filtered))
+ return false;
+ } else {
+ /* Routing tables must be able to hold all modem entries,
+ * plus at least one entry for the AP.
+ */
+ if (count < ipa->modem_route_count + 1)
+ return false;
+ }
+
+ /* If hashing is supported, hashed tables are expected to be defined,
+ * and have the same size as non-hashed tables. If hashing is not
+ * supported, hashed tables are expected to have zero size (or not
+ * be defined).
+ */
+ mem_hashed = ipa_table_mem(ipa, filter, true, false);
+ if (hash_support) {
+ if (!mem_hashed || mem_hashed->size != mem_ipv4->size)
+ return false;
+ } else {
+ if (mem_hashed && mem_hashed->size)
+ return false;
+ }
+
+ /* Same check for IPv6 tables */
+ mem_hashed = ipa_table_mem(ipa, filter, true, true);
+ if (hash_support) {
+ if (!mem_hashed || mem_hashed->size != mem_ipv6->size)
+ return false;
+ } else {
+ if (mem_hashed && mem_hashed->size)
+ return false;
+ }
+
+ return true;
+}
+
+/* Initialize a coherent DMA allocation containing initialized filter and
* route table data. This is used when initializing or resetting the IPA
* filter or route table.
*
* The first entry in a filter table contains a bitmap indicating which
* endpoints contain entries in the table. In addition to that first entry,
- * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table
+ * there is a fixed maximum number of entries that follow. Filter table
* entries are 64 bits wide, and (other than the bitmap) contain the DMA
* address of a filter rule. A "zero rule" indicates no filtering, and
* consists of 64 bits of zeroes. When a filter table is initialized (or
@@ -629,12 +671,6 @@ void ipa_table_config(struct ipa *ipa)
* when a route table is initialized or reset, its entries are made to refer
* to the zero rule. The zero rule is shared for route and filter tables.
*
- * Note that the IPA hardware requires a filter or route rule address to be
- * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
- * has a minimum alignment, and we place the zero rule at the base of that
- * allocated space. In ipa_table_init() we verify the minimum DMA allocation
- * meets our requirement.
- *
* +-------------------+
* --> | zero rule |
* / |-------------------|
@@ -642,8 +678,8 @@ void ipa_table_config(struct ipa *ipa)
* |\ |-------------------|
* | ---- zero rule address | \
* |\ |-------------------| |
- * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
- * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
+ * | ---- zero rule address | | Max IPA filter count
+ * | |-------------------| > or IPA route count,
* | ... | whichever is greater
* \ |-------------------| |
* ---- zero rule address | /
@@ -651,15 +687,17 @@ void ipa_table_config(struct ipa *ipa)
*/
int ipa_table_init(struct ipa *ipa)
{
- u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
struct device *dev = &ipa->pdev->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
size_t size;
+ u32 count;
ipa_table_validate_build();
+ count = max_t(u32, ipa->filter_count, ipa->route_count);
+
/* The IPA hardware requires route and filter table rules to be
* aligned on a 128-byte boundary. We put the "zero rule" at the
* base of the table area allocated here. The DMA address returned
@@ -677,12 +715,16 @@ int ipa_table_init(struct ipa *ipa)
/* First slot is the zero rule */
*virt++ = 0;
- /* Next is the filter table bitmap. The "soft" bitmap value
- * must be converted to the hardware representation by shifting
- * it left one position. (Bit 0 repesents global filtering,
- * which is possible but not used.)
+ /* Next is the filter table bitmap. The "soft" bitmap value might
+ * need to be converted to the hardware representation by shifting
+ * it left one position. Prior to IPA v5.0, bit 0 repesents global
+ * filtering, which is possible but not used. IPA v5.0+ eliminated
+ * that option, so there's no shifting required.
*/
- *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
+ if (ipa->version < IPA_VERSION_5_0)
+ *virt++ = cpu_to_le64(ipa->filtered << 1);
+ else
+ *virt++ = cpu_to_le64(ipa->filtered);
/* All the rest contain the DMA address of the zero rule */
le_addr = cpu_to_le64(addr);
@@ -694,7 +736,7 @@ int ipa_table_init(struct ipa *ipa)
void ipa_table_exit(struct ipa *ipa)
{
- u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
struct device *dev = &ipa->pdev->dev;
size_t size;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 395189f75d78..7cc951904bb4 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -10,31 +10,14 @@
struct ipa;
-/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
-#define IPA_FILTER_COUNT_MAX 14
-
-/* The number of route table entries allotted to the modem */
-#define IPA_ROUTE_MODEM_COUNT 8
-
-/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
-#define IPA_ROUTE_COUNT_MAX 15
-
/**
- * ipa_table_valid() - Validate route and filter table memory regions
+ * ipa_filtered_valid() - Validate a filter table endpoint bitmap
* @ipa: IPA pointer
+ * @filtered: Filter table endpoint bitmap to check
*
* Return: true if all regions are valid, false otherwise
*/
-bool ipa_table_valid(struct ipa *ipa);
-
-/**
- * ipa_filter_map_valid() - Validate a filter table endpoint bitmap
- * @ipa: IPA pointer
- * @filter_mask: Filter table endpoint bitmap to check
- *
- * Return: true if all regions are valid, false otherwise
- */
-bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
+bool ipa_filtered_valid(struct ipa *ipa, u64 filtered);
/**
* ipa_table_hash_support() - Return true if hashed tables are supported
@@ -86,4 +69,11 @@ int ipa_table_init(struct ipa *ipa);
*/
void ipa_table_exit(struct ipa *ipa);
+/**
+ * ipa_table_mem_valid() - Validate sizes of table memory regions
+ * @ipa: IPA pointer
+ * @filter: Whether to check filter or routing tables
+ */
+bool ipa_table_mem_valid(struct ipa *ipa, bool filter);
+
#endif /* _IPA_TABLE_H_ */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 7870e0cc3d7c..7889c310e943 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -19,6 +19,7 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
* @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
@@ -36,6 +37,7 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
+ IPA_VERSION_5_0,
IPA_VERSION_COUNT, /* Last; not a version */
};
@@ -48,6 +50,7 @@ static inline bool ipa_version_supported(enum ipa_version version)
case IPA_VERSION_4_5:
case IPA_VERSION_4_9:
case IPA_VERSION_4_11:
+ case IPA_VERSION_5_0:
return true;
default:
return false;
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 0d002c3c38a2..677ece3bce9e 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -103,7 +103,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
@@ -116,7 +116,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0),
@@ -386,13 +386,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index 6e2f939b18f1..b9c6a50de243 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -108,7 +108,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
@@ -121,7 +121,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
@@ -397,13 +397,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 8fd36569bb9f..9a315130530d 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -140,7 +140,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
@@ -151,7 +151,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
@@ -453,13 +453,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index f8e78e1907c8..7a95149f8ec7 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -132,7 +132,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
@@ -399,13 +399,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index d32b805abb11..587eb8d4e00f 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -134,7 +134,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
@@ -472,13 +472,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index eabbc5451937..1f67a03fe599 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -139,7 +139,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
@@ -150,7 +150,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
@@ -450,13 +450,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 796a38f9d7b2..b15dd9a3ad54 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -301,13 +301,13 @@ static void ipvlan_get_stats64(struct net_device *dev,
for_each_possible_cpu(idx) {
pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
do {
- strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
+ strt = u64_stats_fetch_begin(&pcptr->syncp);
rx_pkts = u64_stats_read(&pcptr->rx_pkts);
rx_bytes = u64_stats_read(&pcptr->rx_bytes);
rx_mcast = u64_stats_read(&pcptr->rx_mcast);
tx_pkts = u64_stats_read(&pcptr->tx_pkts);
tx_bytes = u64_stats_read(&pcptr->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
+ } while (u64_stats_fetch_retry(&pcptr->syncp,
strt));
s->rx_packets += rx_pkts;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2e9742952c4e..f6d53e63ef4e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -106,10 +106,10 @@ void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
+ start = u64_stats_fetch_begin(&lb_stats->syncp);
tpackets = u64_stats_read(&lb_stats->packets);
tbytes = u64_stats_read(&lb_stats->bytes);
- } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
*bytes += tbytes;
*packets += tpackets;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2fbac51b9b19..937f5b1f04ff 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2793,9 +2793,9 @@ static void get_rx_sc_stats(struct net_device *dev,
stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
sum->InOctetsValidated += tmp.InOctetsValidated;
sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
@@ -2874,9 +2874,9 @@ static void get_tx_sc_stats(struct net_device *dev,
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
sum->OutPktsProtected += tmp.OutPktsProtected;
sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
@@ -2930,9 +2930,9 @@ static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
sum->OutPktsUntagged += tmp.OutPktsUntagged;
sum->InPktsUntagged += tmp.InPktsUntagged;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b8cc55b2d721..99a971929c8e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -948,13 +948,13 @@ static void macvlan_dev_get_stats64(struct net_device *dev,
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
rx_packets = u64_stats_read(&p->rx_packets);
rx_bytes = u64_stats_read(&p->rx_bytes);
rx_multicast = u64_stats_read(&p->rx_multicast);
tx_packets = u64_stats_read(&p->tx_packets);
tx_bytes = u64_stats_read(&p->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index 0b9d37979133..3d322ac4f6a5 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -104,19 +104,19 @@ static void mhi_ndo_get_stats64(struct net_device *ndev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
+ start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp);
stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
- } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
+ } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
+ start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp);
stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
- } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
+ } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start));
}
static const struct net_device_ops mhi_netdev_ops = {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 68e56e451b2b..b962fc8e1397 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -994,9 +994,6 @@ static int nsim_dev_info_get(struct devlink *devlink,
{
int err;
- err = devlink_info_driver_name_put(req, DRV_NAME);
- if (err)
- return err;
err = devlink_info_version_stored_put_ext(req, "fw.mgmt", "10.20.30",
DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err)
@@ -1401,12 +1398,11 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
if (nsim_dev_port_is_vf(nsim_dev_port)) {
err = devl_rate_leaf_create(&nsim_dev_port->devlink_port,
- nsim_dev_port);
+ nsim_dev_port, NULL);
if (err)
goto err_nsim_destroy;
}
- devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
list_add(&nsim_dev_port->list, &nsim_dev->port_list);
return 0;
@@ -1429,7 +1425,6 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
list_del(&nsim_dev_port->list);
if (nsim_dev_port_is_vf(nsim_dev_port))
devl_rate_leaf_destroy(&nsim_dev_port->devlink_port);
- devlink_port_type_clear(devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
devl_port_unregister(devlink_port);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 9a1a5b203624..6db6a75ff9b9 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&ns->syncp);
+ start = u64_stats_fetch_begin(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
+ } while (u64_stats_fetch_retry(&ns->syncp, start));
}
static int
@@ -238,13 +238,6 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-static struct devlink_port *nsim_get_devlink_port(struct net_device *dev)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- return &ns->nsim_dev_port->devlink_port;
-}
-
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -263,7 +256,6 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
- .ndo_get_devlink_port = nsim_get_devlink_port,
};
static const struct net_device_ops nsim_vf_netdev_ops = {
@@ -275,7 +267,6 @@ static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_get_stats64 = nsim_get_stats64,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
- .ndo_get_devlink_port = nsim_get_devlink_port,
};
static void nsim_setup(struct net_device *dev)
@@ -360,6 +351,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->nsim_dev_port = nsim_dev_port;
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
+ SET_NETDEV_DEVLINK_PORT(dev, &nsim_dev_port->devlink_port);
nsim_ethtool_init(ns);
if (nsim_dev_port_is_pf(nsim_dev_port))
err = nsim_init_netdevsim(ns);
diff --git a/drivers/net/pcs/pcs-altera-tse.c b/drivers/net/pcs/pcs-altera-tse.c
index 97a7cabff962..d616749761f4 100644
--- a/drivers/net/pcs/pcs-altera-tse.c
+++ b/drivers/net/pcs/pcs-altera-tse.c
@@ -12,22 +12,13 @@
/* SGMII PCS register addresses
*/
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_REG(x) (0x12 + (x))
#define SGMII_PCS_LINK_TIMER_1 0x13
#define SGMII_PCS_IF_MODE 0x14
#define PCS_IF_MODE_SGMII_ENA BIT(0)
#define PCS_IF_MODE_USE_SGMII_AN BIT(1)
-#define PCS_IF_MODE_SGMI_SPEED_MASK GENMASK(3, 2)
-#define PCS_IF_MODE_SGMI_SPEED_10 (0 << 2)
-#define PCS_IF_MODE_SGMI_SPEED_100 (1 << 2)
-#define PCS_IF_MODE_SGMI_SPEED_1000 (2 << 2)
#define PCS_IF_MODE_SGMI_HALF_DUPLEX BIT(4)
#define PCS_IF_MODE_SGMI_PHY_AN BIT(5)
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
struct altera_tse_pcs {
@@ -60,7 +51,6 @@ static void tse_pcs_write(struct altera_tse_pcs *tse_pcs, int regnum,
static int tse_pcs_reset(struct altera_tse_pcs *tse_pcs)
{
- int i = 0;
u16 bmcr;
/* Reset PCS block */
@@ -68,13 +58,9 @@ static int tse_pcs_reset(struct altera_tse_pcs *tse_pcs)
bmcr |= BMCR_RESET;
tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
- for (i = 0; i < SGMII_PCS_SW_RESET_TIMEOUT; i++) {
- if (!(tse_pcs_read(tse_pcs, MII_BMCR) & BMCR_RESET))
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
+ return read_poll_timeout(tse_pcs_read, bmcr, (bmcr & BMCR_RESET),
+ 10, SGMII_PCS_SW_RESET_TIMEOUT, 1,
+ tse_pcs, MII_BMCR);
}
static int alt_tse_pcs_validate(struct phylink_pcs *pcs,
@@ -107,7 +93,6 @@ static int alt_tse_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
if_mode |= PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA;
} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
if_mode &= ~(PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA);
- if_mode |= PCS_IF_MODE_SGMI_SPEED_1000;
}
ctrl |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 70f88eae2a9e..f6a038a1d51e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -188,18 +188,12 @@ static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg)
{
- struct mii_bus *bus = xpcs->mdiodev->bus;
- int addr = xpcs->mdiodev->addr;
-
- return mdiobus_c45_read(bus, addr, dev, reg);
+ return mdiodev_c45_read(xpcs->mdiodev, dev, reg);
}
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
{
- struct mii_bus *bus = xpcs->mdiodev->bus;
- int addr = xpcs->mdiodev->addr;
-
- return mdiobus_c45_write(bus, addr, dev, reg, val);
+ return mdiodev_c45_write(xpcs->mdiodev, dev, reg, val);
}
static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c57a0262fb64..af00cf44cd97 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -260,7 +260,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
- Currently supports the YT8511 gigabit PHY.
+ Currently supports the YT8511, YT8521, YT8531S Gigabit Ethernet PHYs.
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 47a76df36b74..334a6904ca5a 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -22,6 +22,8 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR112 0x03a1b662
+#define PHY_ID_AQR412 0x03a1b712
#define PHY_ID_AQR113C 0x31c31c12
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
@@ -801,6 +803,42 @@ static struct phy_driver aqr_driver[] = {
.read_status = aqr_read_status,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
+ .name = "Aquantia AQR112",
+ .probe = aqr107_probe,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .read_status = aqr107_read_status,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
+ .name = "Aquantia AQR412",
+ .probe = aqr107_probe,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .read_status = aqr107_read_status,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
@@ -831,6 +869,8 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index b60db8b6f477..a6f05e35d91f 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -524,6 +524,8 @@ static int dp83822_read_straps(struct phy_device *phydev)
if (val < 0)
return val;
+ phydev_dbg(phydev, "SOR1 strap register: 0x%04x\n", val);
+
fx_enabled = (val & DP83822_COL_STRAP_MASK) >> DP83822_COL_SHIFT;
if (fx_enabled == DP83822_STRAP_MODE2 ||
fx_enabled == DP83822_STRAP_MODE3)
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 7446d5c6c714..89cd821f1f46 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -940,6 +940,12 @@ static void dp83867_link_change_notify(struct phy_device *phydev)
}
}
+static int dp83867_loopback(struct phy_device *phydev, bool enable)
+{
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ enable ? BMCR_LOOPBACK : 0);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -966,6 +972,7 @@ static struct phy_driver dp83867_driver[] = {
.resume = genphy_resume,
.link_change_notify = dp83867_link_change_notify,
+ .set_loopback = dp83867_loopback,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 54a17b576eac..26ce0c5defcd 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1295,6 +1295,81 @@ static int ksz9131_config_init(struct phy_device *phydev)
return 0;
}
+#define MII_KSZ9131_AUTO_MDIX 0x1C
+#define MII_KSZ9131_AUTO_MDI_SET BIT(7)
+#define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
+
+static int ksz9131_mdix_update(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
+ if (ret & MII_KSZ9131_AUTO_MDI_SET)
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ }
+
+ if (ret & MII_KSZ9131_AUTO_MDI_SET)
+ phydev->mdix = ETH_TP_MDI;
+ else
+ phydev->mdix = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int ksz9131_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = MII_KSZ9131_AUTO_MDIX_SWAP_OFF |
+ MII_KSZ9131_AUTO_MDI_SET;
+ break;
+ case ETH_TP_MDI_X:
+ val = MII_KSZ9131_AUTO_MDIX_SWAP_OFF;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, MII_KSZ9131_AUTO_MDIX,
+ MII_KSZ9131_AUTO_MDIX_SWAP_OFF |
+ MII_KSZ9131_AUTO_MDI_SET, val);
+}
+
+static int ksz9131_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
+static int ksz9131_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
@@ -3304,6 +3379,8 @@ static struct phy_driver ksphy_driver[] = {
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
.config_intr = kszphy_config_intr,
+ .config_aneg = ksz9131_config_aneg,
+ .read_status = ksz9131_read_status,
.handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 7e6ac2c5e27e..685190db72de 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,15 +1,107 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Driver for Motorcomm PHYs
+ * Motorcomm 8511/8521/8531S PHY driver.
*
* Author: Peter Geis <pgwipeout@gmail.com>
+ * Author: Frank <Frank.Sae@motor-comm.com>
*/
+#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
#define PHY_ID_YT8511 0x0000010a
+#define PHY_ID_YT8521 0x0000011A
+#define PHY_ID_YT8531S 0x4F51E91A
+
+/* YT8521/YT8531S Register Overview
+ * UTP Register space | FIBER Register space
+ * ------------------------------------------------------------
+ * | UTP MII | FIBER MII |
+ * | UTP MMD | |
+ * | UTP Extended | FIBER Extended |
+ * ------------------------------------------------------------
+ * | Common Extended |
+ * ------------------------------------------------------------
+ */
+
+/* 0x10 ~ 0x15 , 0x1E and 0x1F are common MII registers of yt phy */
+
+/* Specific Function Control Register */
+#define YTPHY_SPECIFIC_FUNCTION_CONTROL_REG 0x10
+
+/* 2b00 Manual MDI configuration
+ * 2b01 Manual MDIX configuration
+ * 2b10 Reserved
+ * 2b11 Enable automatic crossover for all modes *default*
+ */
+#define YTPHY_SFCR_MDI_CROSSOVER_MODE_MASK (BIT(6) | BIT(5))
+#define YTPHY_SFCR_CROSSOVER_EN BIT(3)
+#define YTPHY_SFCR_SQE_TEST_EN BIT(2)
+#define YTPHY_SFCR_POLARITY_REVERSAL_EN BIT(1)
+#define YTPHY_SFCR_JABBER_DIS BIT(0)
+
+/* Specific Status Register */
+#define YTPHY_SPECIFIC_STATUS_REG 0x11
+#define YTPHY_SSR_SPEED_MODE_OFFSET 14
+
+#define YTPHY_SSR_SPEED_MODE_MASK (BIT(15) | BIT(14))
+#define YTPHY_SSR_SPEED_10M 0x0
+#define YTPHY_SSR_SPEED_100M 0x1
+#define YTPHY_SSR_SPEED_1000M 0x2
+#define YTPHY_SSR_DUPLEX_OFFSET 13
+#define YTPHY_SSR_DUPLEX BIT(13)
+#define YTPHY_SSR_PAGE_RECEIVED BIT(12)
+#define YTPHY_SSR_SPEED_DUPLEX_RESOLVED BIT(11)
+#define YTPHY_SSR_LINK BIT(10)
+#define YTPHY_SSR_MDIX_CROSSOVER BIT(6)
+#define YTPHY_SSR_DOWNGRADE BIT(5)
+#define YTPHY_SSR_TRANSMIT_PAUSE BIT(3)
+#define YTPHY_SSR_RECEIVE_PAUSE BIT(2)
+#define YTPHY_SSR_POLARITY BIT(1)
+#define YTPHY_SSR_JABBER BIT(0)
+
+/* Interrupt enable Register */
+#define YTPHY_INTERRUPT_ENABLE_REG 0x12
+#define YTPHY_IER_WOL BIT(6)
+
+/* Interrupt Status Register */
+#define YTPHY_INTERRUPT_STATUS_REG 0x13
+#define YTPHY_ISR_AUTONEG_ERR BIT(15)
+#define YTPHY_ISR_SPEED_CHANGED BIT(14)
+#define YTPHY_ISR_DUPLEX_CHANGED BIT(13)
+#define YTPHY_ISR_PAGE_RECEIVED BIT(12)
+#define YTPHY_ISR_LINK_FAILED BIT(11)
+#define YTPHY_ISR_LINK_SUCCESSED BIT(10)
+#define YTPHY_ISR_WOL BIT(6)
+#define YTPHY_ISR_WIRESPEED_DOWNGRADE BIT(5)
+#define YTPHY_ISR_SERDES_LINK_FAILED BIT(3)
+#define YTPHY_ISR_SERDES_LINK_SUCCESSED BIT(2)
+#define YTPHY_ISR_POLARITY_CHANGED BIT(1)
+#define YTPHY_ISR_JABBER_HAPPENED BIT(0)
+
+/* Speed Auto Downgrade Control Register */
+#define YTPHY_SPEED_AUTO_DOWNGRADE_CONTROL_REG 0x14
+#define YTPHY_SADCR_SPEED_DOWNGRADE_EN BIT(5)
+
+/* If these bits are set to 3, the PHY attempts five times ( 3(set value) +
+ * additional 2) before downgrading, default 0x3
+ */
+#define YTPHY_SADCR_SPEED_RETRY_LIMIT (0x3 << 2)
+
+/* Rx Error Counter Register */
+#define YTPHY_RX_ERROR_COUNTER_REG 0x15
+
+/* Extended Register's Address Offset Register */
+#define YTPHY_PAGE_SELECT 0x1E
+
+/* Extended Register's Data Register */
+#define YTPHY_PAGE_DATA 0x1F
+
+/* FIBER Auto-Negotiation link partner ability */
+#define YTPHY_FLPA_PAUSE (0x3 << 7)
+#define YTPHY_FLPA_ASYM_PAUSE (0x2 << 7)
#define YT8511_PAGE_SELECT 0x1e
#define YT8511_PAGE 0x1f
@@ -38,6 +130,355 @@
#define YT8511_DELAY_FE_TX_EN (0xf << 12)
#define YT8511_DELAY_FE_TX_DIS (0x2 << 12)
+/* Extended register is different from MMD Register and MII Register.
+ * We can use ytphy_read_ext/ytphy_write_ext/ytphy_modify_ext function to
+ * operate extended register.
+ * Extended Register start
+ */
+
+/* Phy gmii clock gating Register */
+#define YT8521_CLOCK_GATING_REG 0xC
+#define YT8521_CGR_RX_CLK_EN BIT(12)
+
+#define YT8521_EXTREG_SLEEP_CONTROL1_REG 0x27
+#define YT8521_ESC1R_SLEEP_SW BIT(15)
+#define YT8521_ESC1R_PLLON_SLP BIT(14)
+
+/* Phy fiber Link timer cfg2 Register */
+#define YT8521_LINK_TIMER_CFG2_REG 0xA5
+#define YT8521_LTCR_EN_AUTOSEN BIT(15)
+
+/* 0xA000, 0xA001, 0xA003, 0xA006 ~ 0xA00A and 0xA012 are common ext registers
+ * of yt8521 phy. There is no need to switch reg space when operating these
+ * registers.
+ */
+
+#define YT8521_REG_SPACE_SELECT_REG 0xA000
+#define YT8521_RSSR_SPACE_MASK BIT(1)
+#define YT8521_RSSR_FIBER_SPACE (0x1 << 1)
+#define YT8521_RSSR_UTP_SPACE (0x0 << 1)
+#define YT8521_RSSR_TO_BE_ARBITRATED (0xFF)
+
+#define YT8521_CHIP_CONFIG_REG 0xA001
+#define YT8521_CCR_SW_RST BIT(15)
+
+#define YT8521_CCR_MODE_SEL_MASK (BIT(2) | BIT(1) | BIT(0))
+#define YT8521_CCR_MODE_UTP_TO_RGMII 0
+#define YT8521_CCR_MODE_FIBER_TO_RGMII 1
+#define YT8521_CCR_MODE_UTP_FIBER_TO_RGMII 2
+#define YT8521_CCR_MODE_UTP_TO_SGMII 3
+#define YT8521_CCR_MODE_SGPHY_TO_RGMAC 4
+#define YT8521_CCR_MODE_SGMAC_TO_RGPHY 5
+#define YT8521_CCR_MODE_UTP_TO_FIBER_AUTO 6
+#define YT8521_CCR_MODE_UTP_TO_FIBER_FORCE 7
+
+/* 3 phy polling modes,poll mode combines utp and fiber mode*/
+#define YT8521_MODE_FIBER 0x1
+#define YT8521_MODE_UTP 0x2
+#define YT8521_MODE_POLL 0x3
+
+#define YT8521_RGMII_CONFIG1_REG 0xA003
+
+/* TX Gig-E Delay is bits 3:0, default 0x1
+ * TX Fast-E Delay is bits 7:4, default 0xf
+ * RX Delay is bits 13:10, default 0x0
+ * Delay = 150ps * N
+ * On = 2250ps, off = 0ps
+ */
+#define YT8521_RC1R_RX_DELAY_MASK (0xF << 10)
+#define YT8521_RC1R_RX_DELAY_EN (0xF << 10)
+#define YT8521_RC1R_RX_DELAY_DIS (0x0 << 10)
+#define YT8521_RC1R_FE_TX_DELAY_MASK (0xF << 4)
+#define YT8521_RC1R_FE_TX_DELAY_EN (0xF << 4)
+#define YT8521_RC1R_FE_TX_DELAY_DIS (0x0 << 4)
+#define YT8521_RC1R_GE_TX_DELAY_MASK (0xF << 0)
+#define YT8521_RC1R_GE_TX_DELAY_EN (0xF << 0)
+#define YT8521_RC1R_GE_TX_DELAY_DIS (0x0 << 0)
+
+#define YTPHY_MISC_CONFIG_REG 0xA006
+#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
+#define YTPHY_MCR_FIBER_1000BX (0x1 << 0)
+#define YTPHY_MCR_FIBER_100FX (0x0 << 0)
+
+/* WOL MAC ADDR: MACADDR2(highest), MACADDR1(middle), MACADDR0(lowest) */
+#define YTPHY_WOL_MACADDR2_REG 0xA007
+#define YTPHY_WOL_MACADDR1_REG 0xA008
+#define YTPHY_WOL_MACADDR0_REG 0xA009
+
+#define YTPHY_WOL_CONFIG_REG 0xA00A
+#define YTPHY_WCR_INTR_SEL BIT(6)
+#define YTPHY_WCR_ENABLE BIT(3)
+
+/* 2b00 84ms
+ * 2b01 168ms *default*
+ * 2b10 336ms
+ * 2b11 672ms
+ */
+#define YTPHY_WCR_PULSE_WIDTH_MASK (BIT(2) | BIT(1))
+#define YTPHY_WCR_PULSE_WIDTH_672MS (BIT(2) | BIT(1))
+
+/* 1b0 Interrupt and WOL events is level triggered and active LOW *default*
+ * 1b1 Interrupt and WOL events is pulse triggered and active LOW
+ */
+#define YTPHY_WCR_TYPE_PULSE BIT(0)
+
+#define YT8531S_SYNCE_CFG_REG 0xA012
+#define YT8531S_SCR_SYNCE_ENABLE BIT(6)
+
+/* Extended Register end */
+
+struct yt8521_priv {
+ /* combo_advertising is used for case of YT8521 in combo mode,
+ * this means that yt8521 may work in utp or fiber mode which depends
+ * on which media is connected (YT8521_RSSR_TO_BE_ARBITRATED).
+ */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(combo_advertising);
+
+ /* YT8521_MODE_FIBER / YT8521_MODE_UTP / YT8521_MODE_POLL*/
+ u8 polling_mode;
+ u8 strap_mode; /* 8 working modes */
+ /* current reg page of yt8521 phy:
+ * YT8521_RSSR_UTP_SPACE
+ * YT8521_RSSR_FIBER_SPACE
+ * YT8521_RSSR_TO_BE_ARBITRATED
+ */
+ u8 reg_page;
+};
+
+/**
+ * ytphy_read_ext() - read a PHY's extended register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number to read
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns the value of regnum reg or negative error code
+ */
+static int ytphy_read_ext(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_read(phydev, YTPHY_PAGE_DATA);
+}
+
+/**
+ * ytphy_read_ext_with_lock() - read a PHY's extended register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number to read
+ *
+ * returns the value of regnum reg or negative error code
+ */
+static int ytphy_read_ext_with_lock(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = ytphy_read_ext(phydev, regnum);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * ytphy_write_ext() - write a PHY's extended register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative error code
+ */
+static int ytphy_write_ext(struct phy_device *phydev, u16 regnum, u16 val)
+{
+ int ret;
+
+ ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_write(phydev, YTPHY_PAGE_DATA, val);
+}
+
+/**
+ * ytphy_write_ext_with_lock() - write a PHY's extended register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * returns 0 or negative error code
+ */
+static int ytphy_write_ext_with_lock(struct phy_device *phydev, u16 regnum,
+ u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = ytphy_write_ext(phydev, regnum, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * ytphy_modify_ext() - bits modify a PHY's extended register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * NOTE: Convenience function which allows a PHY's extended register to be
+ * modified as new register value = (old register value & ~mask) | set.
+ * The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative error code
+ */
+static int ytphy_modify_ext(struct phy_device *phydev, u16 regnum, u16 mask,
+ u16 set)
+{
+ int ret;
+
+ ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_modify(phydev, YTPHY_PAGE_DATA, mask, set);
+}
+
+/**
+ * ytphy_modify_ext_with_lock() - bits modify a PHY's extended register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * NOTE: Convenience function which allows a PHY's extended register to be
+ * modified as new register value = (old register value & ~mask) | set.
+ *
+ * returns 0 or negative error code
+ */
+static int ytphy_modify_ext_with_lock(struct phy_device *phydev, u16 regnum,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = ytphy_modify_ext(phydev, regnum, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * ytphy_get_wol() - report whether wake-on-lan is enabled
+ * @phydev: a pointer to a &struct phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ *
+ * NOTE: YTPHY_WOL_CONFIG_REG is common ext reg.
+ */
+static void ytphy_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int wol_config;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return;
+
+ if (wol_config & YTPHY_WCR_ENABLE)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * ytphy_set_wol() - turn wake-on-lan on or off
+ * @phydev: a pointer to a &struct phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ *
+ * NOTE: YTPHY_WOL_CONFIG_REG, YTPHY_WOL_MACADDR2_REG, YTPHY_WOL_MACADDR1_REG
+ * and YTPHY_WOL_MACADDR0_REG are common ext reg. The
+ * YTPHY_INTERRUPT_ENABLE_REG of UTP is special, fiber also use this register.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ struct net_device *p_attached_dev;
+ const u16 mac_addr_reg[] = {
+ YTPHY_WOL_MACADDR2_REG,
+ YTPHY_WOL_MACADDR1_REG,
+ YTPHY_WOL_MACADDR0_REG,
+ };
+ const u8 *mac_addr;
+ int old_page;
+ int ret = 0;
+ u16 mask;
+ u16 val;
+ u8 i;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ p_attached_dev = phydev->attached_dev;
+ if (!p_attached_dev)
+ return -ENODEV;
+
+ mac_addr = (const u8 *)p_attached_dev->dev_addr;
+ if (!is_valid_ether_addr(mac_addr))
+ return -EINVAL;
+
+ /* lock mdio bus then switch to utp reg space */
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ /* Store the device address for the magic packet */
+ for (i = 0; i < 3; i++) {
+ ret = ytphy_write_ext(phydev, mac_addr_reg[i],
+ ((mac_addr[i * 2] << 8)) |
+ (mac_addr[i * 2 + 1]));
+ if (ret < 0)
+ goto err_restore_page;
+ }
+
+ /* Enable WOL feature */
+ mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL;
+ val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS;
+ ret = ytphy_modify_ext(phydev, YTPHY_WOL_CONFIG_REG, mask, val);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* Enable WOL interrupt */
+ ret = __phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0,
+ YTPHY_IER_WOL);
+ if (ret < 0)
+ goto err_restore_page;
+
+ } else {
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ /* Disable WOL feature */
+ mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ ret = ytphy_modify_ext(phydev, YTPHY_WOL_CONFIG_REG, mask, 0);
+
+ /* Disable WOL interrupt */
+ ret = __phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG,
+ YTPHY_IER_WOL, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
static int yt8511_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, YT8511_PAGE_SELECT);
@@ -111,6 +552,1201 @@ err_restore_page:
return phy_restore_page(phydev, oldpage, ret);
}
+/**
+ * yt8521_read_page() - read reg page
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns current reg space of yt8521 (YT8521_RSSR_FIBER_SPACE/
+ * YT8521_RSSR_UTP_SPACE) or negative errno code
+ */
+static int yt8521_read_page(struct phy_device *phydev)
+{
+ int old_page;
+
+ old_page = ytphy_read_ext(phydev, YT8521_REG_SPACE_SELECT_REG);
+ if (old_page < 0)
+ return old_page;
+
+ if ((old_page & YT8521_RSSR_SPACE_MASK) == YT8521_RSSR_FIBER_SPACE)
+ return YT8521_RSSR_FIBER_SPACE;
+
+ return YT8521_RSSR_UTP_SPACE;
+};
+
+/**
+ * yt8521_write_page() - write reg page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to write.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_write_page(struct phy_device *phydev, int page)
+{
+ int mask = YT8521_RSSR_SPACE_MASK;
+ int set;
+
+ if ((page & YT8521_RSSR_SPACE_MASK) == YT8521_RSSR_FIBER_SPACE)
+ set = YT8521_RSSR_FIBER_SPACE;
+ else
+ set = YT8521_RSSR_UTP_SPACE;
+
+ return ytphy_modify_ext(phydev, YT8521_REG_SPACE_SELECT_REG, mask, set);
+};
+
+/**
+ * yt8521_probe() - read chip config then set suitable polling_mode
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct yt8521_priv *priv;
+ int chip_config;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ chip_config = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
+ if (chip_config < 0)
+ return chip_config;
+
+ priv->strap_mode = chip_config & YT8521_CCR_MODE_SEL_MASK;
+ switch (priv->strap_mode) {
+ case YT8521_CCR_MODE_FIBER_TO_RGMII:
+ case YT8521_CCR_MODE_SGPHY_TO_RGMAC:
+ case YT8521_CCR_MODE_SGMAC_TO_RGPHY:
+ priv->polling_mode = YT8521_MODE_FIBER;
+ priv->reg_page = YT8521_RSSR_FIBER_SPACE;
+ phydev->port = PORT_FIBRE;
+ break;
+ case YT8521_CCR_MODE_UTP_FIBER_TO_RGMII:
+ case YT8521_CCR_MODE_UTP_TO_FIBER_AUTO:
+ case YT8521_CCR_MODE_UTP_TO_FIBER_FORCE:
+ priv->polling_mode = YT8521_MODE_POLL;
+ priv->reg_page = YT8521_RSSR_TO_BE_ARBITRATED;
+ phydev->port = PORT_NONE;
+ break;
+ case YT8521_CCR_MODE_UTP_TO_SGMII:
+ case YT8521_CCR_MODE_UTP_TO_RGMII:
+ priv->polling_mode = YT8521_MODE_UTP;
+ priv->reg_page = YT8521_RSSR_UTP_SPACE;
+ phydev->port = PORT_TP;
+ break;
+ }
+ /* set default reg space */
+ if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) {
+ ret = ytphy_write_ext_with_lock(phydev,
+ YT8521_REG_SPACE_SELECT_REG,
+ priv->reg_page);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * yt8531s_probe() - read chip config then set suitable polling_mode
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8531s_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable SyncE clock output by default */
+ ret = ytphy_modify_ext_with_lock(phydev, YT8531S_SYNCE_CFG_REG,
+ YT8531S_SCR_SYNCE_ENABLE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* same as yt8521_probe */
+ return yt8521_probe(phydev);
+}
+
+/**
+ * ytphy_utp_read_lpa() - read LPA then setup lp_advertising for utp
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_utp_read_lpa(struct phy_device *phydev)
+{
+ int lpa, lpagb;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (!phydev->autoneg_complete) {
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ 0);
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ return 0;
+ }
+
+ if (phydev->is_gigabit_capable) {
+ lpagb = __phy_read(phydev, MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
+ if (lpagb & LPA_1000MSFAIL) {
+ int adv = __phy_read(phydev, MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
+ if (adv & CTL1000_ENABLE_MASTER)
+ phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
+ else
+ phydev_err(phydev, "Master/Slave resolution failed\n");
+ return -ENOLINK;
+ }
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
+ }
+
+ lpa = __phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ } else {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ return 0;
+}
+
+/**
+ * yt8521_adjust_status() - update speed and duplex to phydev. when in fiber
+ * mode, adjust speed and duplex.
+ * @phydev: a pointer to a &struct phy_device
+ * @status: yt8521 status read from YTPHY_SPECIFIC_STATUS_REG
+ * @is_utp: false(yt8521 work in fiber mode) or true(yt8521 work in utp mode)
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns 0
+ */
+static int yt8521_adjust_status(struct phy_device *phydev, int status,
+ bool is_utp)
+{
+ int speed_mode, duplex;
+ int speed;
+ int err;
+ int lpa;
+
+ if (is_utp)
+ duplex = (status & YTPHY_SSR_DUPLEX) >> YTPHY_SSR_DUPLEX_OFFSET;
+ else
+ duplex = DUPLEX_FULL; /* for fiber, it always DUPLEX_FULL */
+
+ speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >>
+ YTPHY_SSR_SPEED_MODE_OFFSET;
+
+ switch (speed_mode) {
+ case YTPHY_SSR_SPEED_10M:
+ if (is_utp)
+ speed = SPEED_10;
+ else
+ /* for fiber, it will never run here, default to
+ * SPEED_UNKNOWN
+ */
+ speed = SPEED_UNKNOWN;
+ break;
+ case YTPHY_SSR_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ case YTPHY_SSR_SPEED_1000M:
+ speed = SPEED_1000;
+ break;
+ default:
+ speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+
+ if (is_utp) {
+ err = ytphy_utp_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ phy_resolve_aneg_pause(phydev);
+ } else {
+ lpa = __phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ /* only support 1000baseX Full */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XFULL);
+
+ if (!(lpa & YTPHY_FLPA_PAUSE)) {
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ } else if ((lpa & YTPHY_FLPA_ASYM_PAUSE)) {
+ phydev->pause = 1;
+ phydev->asym_pause = 1;
+ } else {
+ phydev->pause = 1;
+ phydev->asym_pause = 0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * yt8521_read_status_paged() - determines the speed and duplex of one page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to
+ * operate.
+ *
+ * returns 1 (utp or fiber link),0 (no link) or negative errno code
+ */
+static int yt8521_read_status_paged(struct phy_device *phydev, int page)
+{
+ int fiber_latch_val;
+ int fiber_curr_val;
+ int old_page;
+ int ret = 0;
+ int status;
+ int link;
+
+ linkmode_zero(phydev->lp_advertising);
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->asym_pause = 0;
+ phydev->pause = 0;
+
+ /* YT8521 has two reg space (utp/fiber) for linkup with utp/fiber
+ * respectively. but for utp/fiber combo mode, reg space should be
+ * arbitrated based on media priority. by default, utp takes
+ * priority. reg space should be properly set before read
+ * YTPHY_SPECIFIC_STATUS_REG.
+ */
+
+ page &= YT8521_RSSR_SPACE_MASK;
+ old_page = phy_select_page(phydev, page);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ /* Read YTPHY_SPECIFIC_STATUS_REG, which indicates the speed and duplex
+ * of the PHY is actually using.
+ */
+ ret = __phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG);
+ if (ret < 0)
+ goto err_restore_page;
+
+ status = ret;
+ link = !!(status & YTPHY_SSR_LINK);
+
+ /* When PHY is in fiber mode, speed transferred from 1000Mbps to
+ * 100Mbps,there is not link down from YTPHY_SPECIFIC_STATUS_REG, so
+ * we need check MII_BMSR to identify such case.
+ */
+ if (page == YT8521_RSSR_FIBER_SPACE) {
+ ret = __phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ goto err_restore_page;
+
+ fiber_latch_val = ret;
+ ret = __phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ goto err_restore_page;
+
+ fiber_curr_val = ret;
+ if (link && fiber_latch_val != fiber_curr_val) {
+ link = 0;
+ phydev_info(phydev,
+ "%s, fiber link down detect, latch = %04x, curr = %04x\n",
+ __func__, fiber_latch_val, fiber_curr_val);
+ }
+ } else {
+ /* Read autonegotiation status */
+ ret = __phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ goto err_restore_page;
+
+ phydev->autoneg_complete = ret & BMSR_ANEGCOMPLETE ? 1 : 0;
+ }
+
+ if (link) {
+ if (page == YT8521_RSSR_UTP_SPACE)
+ yt8521_adjust_status(phydev, status, true);
+ else
+ yt8521_adjust_status(phydev, status, false);
+ }
+ return phy_restore_page(phydev, old_page, link);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8521_read_status() - determines the negotiated speed and duplex
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_read_status(struct phy_device *phydev)
+{
+ struct yt8521_priv *priv = phydev->priv;
+ int link_fiber = 0;
+ int link_utp;
+ int link;
+ int ret;
+
+ if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) {
+ link = yt8521_read_status_paged(phydev, priv->reg_page);
+ if (link < 0)
+ return link;
+ } else {
+ /* when page is YT8521_RSSR_TO_BE_ARBITRATED, arbitration is
+ * needed. by default, utp is higher priority.
+ */
+
+ link_utp = yt8521_read_status_paged(phydev,
+ YT8521_RSSR_UTP_SPACE);
+ if (link_utp < 0)
+ return link_utp;
+
+ if (!link_utp) {
+ link_fiber = yt8521_read_status_paged(phydev,
+ YT8521_RSSR_FIBER_SPACE);
+ if (link_fiber < 0)
+ return link_fiber;
+ }
+
+ link = link_utp || link_fiber;
+ }
+
+ if (link) {
+ if (phydev->link == 0) {
+ /* arbitrate reg space based on linkup media type. */
+ if (priv->polling_mode == YT8521_MODE_POLL &&
+ priv->reg_page == YT8521_RSSR_TO_BE_ARBITRATED) {
+ if (link_fiber)
+ priv->reg_page =
+ YT8521_RSSR_FIBER_SPACE;
+ else
+ priv->reg_page = YT8521_RSSR_UTP_SPACE;
+
+ ret = ytphy_write_ext_with_lock(phydev,
+ YT8521_REG_SPACE_SELECT_REG,
+ priv->reg_page);
+ if (ret < 0)
+ return ret;
+
+ phydev->port = link_fiber ? PORT_FIBRE : PORT_TP;
+
+ phydev_info(phydev, "%s, link up, media: %s\n",
+ __func__,
+ (phydev->port == PORT_TP) ?
+ "UTP" : "Fiber");
+ }
+ }
+ phydev->link = 1;
+ } else {
+ if (phydev->link == 1) {
+ phydev_info(phydev, "%s, link down, media: %s\n",
+ __func__, (phydev->port == PORT_TP) ?
+ "UTP" : "Fiber");
+
+ /* When in YT8521_MODE_POLL mode, need prepare for next
+ * arbitration.
+ */
+ if (priv->polling_mode == YT8521_MODE_POLL) {
+ priv->reg_page = YT8521_RSSR_TO_BE_ARBITRATED;
+ phydev->port = PORT_NONE;
+ }
+ }
+
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * yt8521_modify_bmcr_paged - bits modify a PHY's BMCR register of one page
+ * @phydev: the phy_device struct
+ * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to operate
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * NOTE: Convenience function which allows a PHY's BMCR register to be
+ * modified as new register value = (old register value & ~mask) | set.
+ * YT8521 has two space (utp/fiber) and three mode (utp/fiber/poll), each space
+ * has MII_BMCR. poll mode combines utp and faber,so need do both.
+ * If it is reset, it will wait for completion.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_modify_bmcr_paged(struct phy_device *phydev, int page,
+ u16 mask, u16 set)
+{
+ int max_cnt = 500; /* the max wait time of reset ~ 500 ms */
+ int old_page;
+ int ret = 0;
+
+ old_page = phy_select_page(phydev, page & YT8521_RSSR_SPACE_MASK);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ ret = __phy_modify(phydev, MII_BMCR, mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* If it is reset, need to wait for the reset to complete */
+ if (set == BMCR_RESET) {
+ while (max_cnt--) {
+ usleep_range(1000, 1100);
+ ret = __phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ goto err_restore_page;
+
+ if (!(ret & BMCR_RESET))
+ return phy_restore_page(phydev, old_page, 0);
+ }
+ }
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8521_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register
+ * @phydev: the phy_device struct
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * NOTE: Convenience function which allows a PHY's BMCR register to be
+ * modified as new register value = (old register value & ~mask) | set.
+ * YT8521 has two space (utp/fiber) and three mode (utp/fiber/poll), each space
+ * has MII_BMCR. poll mode combines utp and faber,so need do both.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_modify_utp_fiber_bmcr(struct phy_device *phydev, u16 mask,
+ u16 set)
+{
+ struct yt8521_priv *priv = phydev->priv;
+ int ret;
+
+ if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) {
+ ret = yt8521_modify_bmcr_paged(phydev, priv->reg_page, mask,
+ set);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE,
+ mask, set);
+ if (ret < 0)
+ return ret;
+
+ ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE,
+ mask, set);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * yt8521_soft_reset() - called to issue a PHY software reset
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_soft_reset(struct phy_device *phydev)
+{
+ return yt8521_modify_utp_fiber_bmcr(phydev, 0, BMCR_RESET);
+}
+
+/**
+ * yt8521_suspend() - suspend the hardware
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_suspend(struct phy_device *phydev)
+{
+ int wol_config;
+
+ /* YTPHY_WOL_CONFIG_REG is common ext reg */
+ wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return wol_config;
+
+ /* if wol enable, do nothing */
+ if (wol_config & YTPHY_WCR_ENABLE)
+ return 0;
+
+ return yt8521_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN);
+}
+
+/**
+ * yt8521_resume() - resume the hardware
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_resume(struct phy_device *phydev)
+{
+ int ret;
+ int wol_config;
+
+ /* disable auto sleep */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ return ret;
+
+ wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return wol_config;
+
+ /* if wol enable, do nothing */
+ if (wol_config & YTPHY_WCR_ENABLE)
+ return 0;
+
+ return yt8521_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0);
+}
+
+/**
+ * yt8521_config_init() - called to initialize the PHY
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_config_init(struct phy_device *phydev)
+{
+ int old_page;
+ int ret = 0;
+ u16 val;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
+ val |= YT8521_RC1R_RX_DELAY_DIS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
+ val |= YT8521_RC1R_RX_DELAY_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
+ val |= YT8521_RC1R_RX_DELAY_DIS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
+ val |= YT8521_RC1R_RX_DELAY_EN;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ break;
+ default: /* do not support other modes */
+ ret = -EOPNOTSUPP;
+ goto err_restore_page;
+ }
+
+ /* set rgmii delay mode */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII) {
+ ret = ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG,
+ (YT8521_RC1R_RX_DELAY_MASK |
+ YT8521_RC1R_FE_TX_DELAY_MASK |
+ YT8521_RC1R_GE_TX_DELAY_MASK),
+ val);
+ if (ret < 0)
+ goto err_restore_page;
+ }
+
+ /* disable auto sleep */
+ ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8521_prepare_fiber_features() - A small helper function that setup
+ * fiber's features.
+ * @phydev: a pointer to a &struct phy_device
+ * @dst: a pointer to store fiber's features
+ */
+static void yt8521_prepare_fiber_features(struct phy_device *phydev,
+ unsigned long *dst)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, dst);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, dst);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, dst);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, dst);
+}
+
+/**
+ * yt8521_fiber_setup_forced - configures/forces speed from @phydev
+ * @phydev: target phy_device struct
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_fiber_setup_forced(struct phy_device *phydev)
+{
+ u16 val;
+ int ret;
+
+ if (phydev->speed == SPEED_1000)
+ val = YTPHY_MCR_FIBER_1000BX;
+ else if (phydev->speed == SPEED_100)
+ val = YTPHY_MCR_FIBER_100FX;
+ else
+ return -EINVAL;
+
+ ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* disable Fiber auto sensing */
+ ret = ytphy_modify_ext(phydev, YT8521_LINK_TIMER_CFG2_REG,
+ YT8521_LTCR_EN_AUTOSEN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ytphy_modify_ext(phydev, YTPHY_MISC_CONFIG_REG,
+ YTPHY_MCR_FIBER_SPEED_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ return ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_SW_RST, 0);
+}
+
+/**
+ * ytphy_check_and_restart_aneg - Enable and restart auto-negotiation
+ * @phydev: target phy_device struct
+ * @restart: whether aneg restart is requested
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
+{
+ int ret;
+
+ if (!restart) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ ret = __phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE))
+ restart = true;
+ }
+ /* Enable and Restart Autonegotiation
+ * Don't isolate the PHY if we're negotiating
+ */
+ if (restart)
+ return __phy_modify(phydev, MII_BMCR, BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+
+ return 0;
+}
+
+/**
+ * yt8521_fiber_config_aneg - restart auto-negotiation or write
+ * YTPHY_MISC_CONFIG_REG.
+ * @phydev: target phy_device struct
+ *
+ * NOTE:The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_fiber_config_aneg(struct phy_device *phydev)
+{
+ int err, changed = 0;
+ int bmcr;
+ u16 adv;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return yt8521_fiber_setup_forced(phydev);
+
+ /* enable Fiber auto sensing */
+ err = ytphy_modify_ext(phydev, YT8521_LINK_TIMER_CFG2_REG,
+ 0, YT8521_LTCR_EN_AUTOSEN);
+ if (err < 0)
+ return err;
+
+ err = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_SW_RST, 0);
+ if (err < 0)
+ return err;
+
+ bmcr = __phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ /* When it is coming from fiber forced mode, add bmcr power down
+ * and power up to let aneg work fine.
+ */
+ if (!(bmcr & BMCR_ANENABLE)) {
+ __phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+ usleep_range(1000, 1100);
+ __phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+ }
+
+ adv = linkmode_adv_to_mii_adv_x(phydev->advertising,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+
+ /* Setup fiber advertisement */
+ err = __phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XHALF | ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM,
+ adv);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ changed = 1;
+
+ return ytphy_check_and_restart_aneg(phydev, changed);
+}
+
+/**
+ * ytphy_setup_master_slave
+ * @phydev: target phy_device struct
+ *
+ * NOTE: The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_setup_master_slave(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ if (!phydev->is_gigabit_capable)
+ return 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ ctl |= CTL1000_PREFER_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ fallthrough;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ ctl |= CTL1000_ENABLE_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return __phy_modify_changed(phydev, MII_CTRL1000,
+ (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER |
+ CTL1000_PREFER_MASTER), ctl);
+}
+
+/**
+ * ytphy_utp_config_advert - sanitize and advertise auto-negotiation parameters
+ * @phydev: target phy_device struct
+ *
+ * NOTE: Writes MII_ADVERTISE with the appropriate values,
+ * after sanitizing the values to make sure we only advertise
+ * what is supported. Returns < 0 on error, 0 if the PHY's advertisement
+ * hasn't changed, and > 0 if it has changed.
+ * The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_utp_config_advert(struct phy_device *phydev)
+{
+ int err, bmsr, changed = 0;
+ u32 adv;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
+
+ /* Setup standard advertisement */
+ err = __phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_100BASE4 |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
+ adv);
+ if (err < 0)
+ return err;
+ if (err > 0)
+ changed = 1;
+
+ bmsr = __phy_read(phydev, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
+
+ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+ * logical 1.
+ */
+ if (!(bmsr & BMSR_ESTATEN))
+ return changed;
+
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+
+ err = __phy_modify_changed(phydev, MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
+ if (err < 0)
+ return err;
+ if (err > 0)
+ changed = 1;
+
+ return changed;
+}
+
+/**
+ * ytphy_utp_config_aneg - restart auto-negotiation or write BMCR
+ * @phydev: target phy_device struct
+ * @changed: whether autoneg is requested
+ *
+ * NOTE: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR.
+ * The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_utp_config_aneg(struct phy_device *phydev, bool changed)
+{
+ int err;
+ u16 ctl;
+
+ err = ytphy_setup_master_slave(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ /* configures/forces speed/duplex from @phydev */
+
+ ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+
+ return __phy_modify(phydev, MII_BMCR, ~(BMCR_LOOPBACK |
+ BMCR_ISOLATE | BMCR_PDOWN), ctl);
+ }
+
+ err = ytphy_utp_config_advert(phydev);
+ if (err < 0) /* error */
+ return err;
+ else if (err)
+ changed = true;
+
+ return ytphy_check_and_restart_aneg(phydev, changed);
+}
+
+/**
+ * yt8521_config_aneg_paged() - switch reg space then call genphy_config_aneg
+ * of one page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to
+ * operate.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_config_aneg_paged(struct phy_device *phydev, int page)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fiber_supported);
+ struct yt8521_priv *priv = phydev->priv;
+ int old_page;
+ int ret = 0;
+
+ page &= YT8521_RSSR_SPACE_MASK;
+
+ old_page = phy_select_page(phydev, page);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ /* If reg_page is YT8521_RSSR_TO_BE_ARBITRATED,
+ * phydev->advertising should be updated.
+ */
+ if (priv->reg_page == YT8521_RSSR_TO_BE_ARBITRATED) {
+ linkmode_zero(fiber_supported);
+ yt8521_prepare_fiber_features(phydev, fiber_supported);
+
+ /* prepare fiber_supported, then setup advertising. */
+ if (page == YT8521_RSSR_FIBER_SPACE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ fiber_supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ fiber_supported);
+ linkmode_and(phydev->advertising,
+ priv->combo_advertising, fiber_supported);
+ } else {
+ /* ETHTOOL_LINK_MODE_Autoneg_BIT is also used in utp */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ fiber_supported);
+ linkmode_andnot(phydev->advertising,
+ priv->combo_advertising,
+ fiber_supported);
+ }
+ }
+
+ if (page == YT8521_RSSR_FIBER_SPACE)
+ ret = yt8521_fiber_config_aneg(phydev);
+ else
+ ret = ytphy_utp_config_aneg(phydev, false);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8521_config_aneg() - change reg space then call yt8521_config_aneg_paged
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_config_aneg(struct phy_device *phydev)
+{
+ struct yt8521_priv *priv = phydev->priv;
+ int ret;
+
+ if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) {
+ ret = yt8521_config_aneg_paged(phydev, priv->reg_page);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* If reg_page is YT8521_RSSR_TO_BE_ARBITRATED,
+ * phydev->advertising need to be saved at first run.
+ * Because it contains the advertising which supported by both
+ * mac and yt8521(utp and fiber).
+ */
+ if (linkmode_empty(priv->combo_advertising)) {
+ linkmode_copy(priv->combo_advertising,
+ phydev->advertising);
+ }
+
+ ret = yt8521_config_aneg_paged(phydev, YT8521_RSSR_UTP_SPACE);
+ if (ret < 0)
+ return ret;
+
+ ret = yt8521_config_aneg_paged(phydev, YT8521_RSSR_FIBER_SPACE);
+ if (ret < 0)
+ return ret;
+
+ /* we don't known which will be link, so restore
+ * phydev->advertising as default value.
+ */
+ linkmode_copy(phydev->advertising, priv->combo_advertising);
+ }
+ return 0;
+}
+
+/**
+ * yt8521_aneg_done_paged() - determines the auto negotiation result of one
+ * page.
+ * @phydev: a pointer to a &struct phy_device
+ * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to
+ * operate.
+ *
+ * returns 0(no link)or 1(fiber or utp link) or negative errno code
+ */
+static int yt8521_aneg_done_paged(struct phy_device *phydev, int page)
+{
+ int old_page;
+ int ret = 0;
+ int link;
+
+ old_page = phy_select_page(phydev, page & YT8521_RSSR_SPACE_MASK);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ ret = __phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG);
+ if (ret < 0)
+ goto err_restore_page;
+
+ link = !!(ret & YTPHY_SSR_LINK);
+ ret = link;
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8521_aneg_done() - determines the auto negotiation result
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0(no link)or 1(fiber or utp link) or negative errno code
+ */
+static int yt8521_aneg_done(struct phy_device *phydev)
+{
+ struct yt8521_priv *priv = phydev->priv;
+ int link_fiber = 0;
+ int link_utp;
+ int link;
+
+ if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) {
+ link = yt8521_aneg_done_paged(phydev, priv->reg_page);
+ } else {
+ link_utp = yt8521_aneg_done_paged(phydev,
+ YT8521_RSSR_UTP_SPACE);
+ if (link_utp < 0)
+ return link_utp;
+
+ if (!link_utp) {
+ link_fiber = yt8521_aneg_done_paged(phydev,
+ YT8521_RSSR_FIBER_SPACE);
+ if (link_fiber < 0)
+ return link_fiber;
+ }
+ link = link_fiber || link_utp;
+ phydev_info(phydev, "%s, link_fiber: %d, link_utp: %d\n",
+ __func__, link_fiber, link_utp);
+ }
+
+ return link;
+}
+
+/**
+ * ytphy_utp_read_abilities - read PHY abilities from Clause 22 registers
+ * @phydev: target phy_device struct
+ *
+ * NOTE: Reads the PHY's abilities and populates
+ * phydev->supported accordingly.
+ * The caller must have taken the MDIO bus lock.
+ *
+ * returns 0 or negative errno code
+ */
+static int ytphy_utp_read_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ val = __phy_read(phydev, MII_BMSR);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported,
+ val & BMSR_ANEGCAPABLE);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported,
+ val & BMSR_100FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported,
+ val & BMSR_100HALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported,
+ val & BMSR_10FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported,
+ val & BMSR_10HALF);
+
+ if (val & BMSR_ESTATEN) {
+ val = __phy_read(phydev, MII_ESTATUS);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported, val & ESTATUS_1000_TFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported, val & ESTATUS_1000_THALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported, val & ESTATUS_1000_XFULL);
+ }
+
+ return 0;
+}
+
+/**
+ * yt8521_get_features_paged() - read supported link modes for one page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to
+ * operate.
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_get_features_paged(struct phy_device *phydev, int page)
+{
+ int old_page;
+ int ret = 0;
+
+ page &= YT8521_RSSR_SPACE_MASK;
+ old_page = phy_select_page(phydev, page);
+ if (old_page < 0)
+ goto err_restore_page;
+
+ if (page == YT8521_RSSR_FIBER_SPACE) {
+ linkmode_zero(phydev->supported);
+ yt8521_prepare_fiber_features(phydev, phydev->supported);
+ } else {
+ ret = ytphy_utp_read_abilities(phydev);
+ if (ret < 0)
+ goto err_restore_page;
+ }
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8521_get_features - switch reg space then call yt8521_get_features_paged
+ * @phydev: target phy_device struct
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8521_get_features(struct phy_device *phydev)
+{
+ struct yt8521_priv *priv = phydev->priv;
+ int ret;
+
+ if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) {
+ ret = yt8521_get_features_paged(phydev, priv->reg_page);
+ } else {
+ ret = yt8521_get_features_paged(phydev,
+ YT8521_RSSR_UTP_SPACE);
+ if (ret < 0)
+ return ret;
+
+ /* add fiber's features to phydev->supported */
+ yt8521_prepare_fiber_features(phydev, phydev->supported);
+ }
+ return ret;
+}
+
static struct phy_driver motorcomm_phy_drvs[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8511),
@@ -121,16 +1757,53 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.read_page = yt8511_read_page,
.write_page = yt8511_write_page,
},
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8521),
+ .name = "YT8521 Gigabit Ethernet",
+ .get_features = yt8521_get_features,
+ .probe = yt8521_probe,
+ .read_page = yt8521_read_page,
+ .write_page = yt8521_write_page,
+ .get_wol = ytphy_get_wol,
+ .set_wol = ytphy_set_wol,
+ .config_aneg = yt8521_config_aneg,
+ .aneg_done = yt8521_aneg_done,
+ .config_init = yt8521_config_init,
+ .read_status = yt8521_read_status,
+ .soft_reset = yt8521_soft_reset,
+ .suspend = yt8521_suspend,
+ .resume = yt8521_resume,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S),
+ .name = "YT8531S Gigabit Ethernet",
+ .get_features = yt8521_get_features,
+ .probe = yt8531s_probe,
+ .read_page = yt8521_read_page,
+ .write_page = yt8521_write_page,
+ .get_wol = ytphy_get_wol,
+ .set_wol = ytphy_set_wol,
+ .config_aneg = yt8521_config_aneg,
+ .aneg_done = yt8521_aneg_done,
+ .config_init = yt8521_config_init,
+ .read_status = yt8521_read_status,
+ .soft_reset = yt8521_soft_reset,
+ .suspend = yt8521_suspend,
+ .resume = yt8521_resume,
+ },
};
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531S PHY driver");
MODULE_AUTHOR("Peter Geis");
+MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8511) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
{ /* sentinal */ }
};
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index f81b077618f4..018253a573b8 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -501,8 +501,7 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
}
/* Derive the AES key to get a key for the hash autentication */
-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN],
- u16 key_len, u8 hkey[16])
+static int vsc8584_macsec_derive_key(const u8 *key, u16 key_len, u8 hkey[16])
{
const u8 input[AES_BLOCK_SIZE] = {0};
struct crypto_aes_ctx ctx;
@@ -518,7 +517,8 @@ static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN],
}
static int vsc8584_macsec_transformation(struct phy_device *phydev,
- struct macsec_flow *flow)
+ struct macsec_flow *flow,
+ const u8 *key)
{
struct vsc8531_private *priv = phydev->priv;
enum macsec_bank bank = flow->bank;
@@ -527,7 +527,7 @@ static int vsc8584_macsec_transformation(struct phy_device *phydev,
u8 hkey[16];
u64 sci;
- ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
+ ret = vsc8584_macsec_derive_key(key, priv->secy->key_len, hkey);
if (ret)
return ret;
@@ -563,7 +563,7 @@ static int vsc8584_macsec_transformation(struct phy_device *phydev,
for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
vsc8584_macsec_phy_write(phydev, bank,
MSCC_MS_XFORM_REC(index, rec++),
- ((u32 *)flow->key)[i]);
+ ((u32 *)key)[i]);
/* Set the authentication key */
for (i = 0; i < 4; i++)
@@ -632,28 +632,14 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
list_del(&flow->list);
clear_bit(flow->index, bitmap);
- memzero_explicit(flow->key, sizeof(flow->key));
kfree(flow);
}
-static int vsc8584_macsec_add_flow(struct phy_device *phydev,
- struct macsec_flow *flow, bool update)
+static void vsc8584_macsec_add_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
{
- int ret;
-
flow->port = MSCC_MS_PORT_CONTROLLED;
vsc8584_macsec_flow(phydev, flow);
-
- if (update)
- return 0;
-
- ret = vsc8584_macsec_transformation(phydev, flow);
- if (ret) {
- vsc8584_macsec_free_flow(phydev->priv, flow);
- return ret;
- }
-
- return 0;
}
static int vsc8584_macsec_default_flows(struct phy_device *phydev)
@@ -706,6 +692,7 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
{
struct phy_device *phydev = ctx->phydev;
struct vsc8531_private *priv = phydev->priv;
+ int ret;
flow->assoc_num = ctx->sa.assoc_num;
flow->rx_sa = ctx->sa.rx_sa;
@@ -717,19 +704,39 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
flow->match.untagged = 1;
- return vsc8584_macsec_add_flow(phydev, flow, update);
+ vsc8584_macsec_add_flow(phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(phydev, flow, ctx->sa.key);
+ if (ret)
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+
+ return ret;
}
static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
struct macsec_flow *flow, bool update)
{
+ int ret;
+
flow->assoc_num = ctx->sa.assoc_num;
flow->tx_sa = ctx->sa.tx_sa;
/* Always match untagged packets on egress */
flow->match.untagged = 1;
- return vsc8584_macsec_add_flow(ctx->phydev, flow, update);
+ vsc8584_macsec_add_flow(ctx->phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(ctx->phydev, flow, ctx->sa.key);
+ if (ret)
+ vsc8584_macsec_free_flow(ctx->phydev->priv, flow);
+
+ return ret;
}
static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
@@ -829,8 +836,6 @@ static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
if (IS_ERR(flow))
return PTR_ERR(flow);
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
-
ret = __vsc8584_macsec_add_rxsa(ctx, flow, false);
if (ret)
return ret;
@@ -882,8 +887,6 @@ static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
if (IS_ERR(flow))
return PTR_ERR(flow);
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
-
ret = __vsc8584_macsec_add_txsa(ctx, flow, false);
if (ret)
return ret;
diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
index 453304bae778..21ce3b892f7f 100644
--- a/drivers/net/phy/mscc/mscc_macsec.h
+++ b/drivers/net/phy/mscc/mscc_macsec.h
@@ -81,8 +81,6 @@ struct macsec_flow {
/* Highest takes precedence [0..15] */
u8 priority;
- u8 key[MACSEC_MAX_KEY_LEN];
-
union {
struct macsec_rx_sa *rx_sa;
struct macsec_tx_sa *tx_sa;
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index cae24091fb6f..147d7a5a9b35 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -30,6 +30,10 @@
#define PHY_ID_GPY241BM 0x67C9DE80
#define PHY_ID_GPY245B 0x67C9DEC0
+#define PHY_CTL1 0x13
+#define PHY_CTL1_MDICD BIT(3)
+#define PHY_CTL1_MDIAB BIT(2)
+#define PHY_CTL1_AMDIX BIT(0)
#define PHY_MIISTAT 0x18 /* MII state */
#define PHY_IMASK 0x19 /* interrupt mask */
#define PHY_ISTAT 0x1A /* interrupt status */
@@ -60,6 +64,13 @@
#define PHY_FWV_MAJOR_MASK GENMASK(11, 8)
#define PHY_FWV_MINOR_MASK GENMASK(7, 0)
+#define PHY_PMA_MGBT_POLARITY 0x82
+#define PHY_MDI_MDI_X_MASK GENMASK(1, 0)
+#define PHY_MDI_MDI_X_NORMAL 0x3
+#define PHY_MDI_MDI_X_AB 0x2
+#define PHY_MDI_MDI_X_CD 0x1
+#define PHY_MDI_MDI_X_CROSS 0x0
+
/* SGMII */
#define VSPEC1_SGMII_CTRL 0x08
#define VSPEC1_SGMII_CTRL_ANEN BIT(12) /* Aneg enable */
@@ -68,8 +79,8 @@
VSPEC1_SGMII_CTRL_ANRS)
/* Temperature sensor */
-#define VPSPEC1_TEMP_STA 0x0E
-#define VPSPEC1_TEMP_STA_DATA GENMASK(9, 0)
+#define VSPEC1_TEMP_STA 0x0E
+#define VSPEC1_TEMP_STA_DATA GENMASK(9, 0)
/* Mailbox */
#define VSPEC1_MBOX_DATA 0x5
@@ -144,14 +155,14 @@ static int gpy_hwmon_read(struct device *dev,
struct phy_device *phydev = dev_get_drvdata(dev);
int ret;
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VPSPEC1_TEMP_STA);
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_TEMP_STA);
if (ret < 0)
return ret;
if (!ret)
return -ENODATA;
*value = polynomial_calc(&poly_N_to_temp,
- FIELD_GET(VPSPEC1_TEMP_STA_DATA, ret));
+ FIELD_GET(VSPEC1_TEMP_STA_DATA, ret));
return 0;
}
@@ -351,6 +362,33 @@ static bool gpy_sgmii_aneg_en(struct phy_device *phydev)
return (ret & VSPEC1_SGMII_CTRL_ANEN) ? true : false;
}
+static int gpy_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ int ret;
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI_AUTO:
+ val = PHY_CTL1_AMDIX;
+ break;
+ case ETH_TP_MDI_X:
+ val = (PHY_CTL1_MDIAB | PHY_CTL1_MDICD);
+ break;
+ case ETH_TP_MDI:
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = phy_modify(phydev, PHY_CTL1, PHY_CTL1_AMDIX | PHY_CTL1_MDIAB |
+ PHY_CTL1_MDICD, val);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_restart_aneg(phydev);
+}
+
static int gpy_config_aneg(struct phy_device *phydev)
{
bool changed = false;
@@ -366,6 +404,10 @@ static int gpy_config_aneg(struct phy_device *phydev)
: genphy_c45_pma_setup_forced(phydev);
}
+ ret = gpy_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+
ret = genphy_c45_an_config_aneg(phydev);
if (ret < 0)
return ret;
@@ -432,14 +474,42 @@ static int gpy_config_aneg(struct phy_device *phydev)
VSPEC1_SGMII_CTRL_ANRS, VSPEC1_SGMII_CTRL_ANRS);
}
-static void gpy_update_interface(struct phy_device *phydev)
+static int gpy_update_mdix(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, PHY_CTL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & PHY_CTL1_AMDIX)
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ if (ret & PHY_CTL1_MDICD || ret & PHY_CTL1_MDIAB)
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PHY_PMA_MGBT_POLARITY);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & PHY_MDI_MDI_X_MASK) < PHY_MDI_MDI_X_NORMAL)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ return 0;
+}
+
+static int gpy_update_interface(struct phy_device *phydev)
{
int ret;
/* Interface mode is fixed for USXGMII and integrated PHY */
if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
- return;
+ return -EINVAL;
/* Automatically switch SERDES interface between SGMII and 2500-BaseX
* according to speed. Disable ANEG in 2500-BaseX mode.
@@ -449,10 +519,12 @@ static void gpy_update_interface(struct phy_device *phydev)
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
VSPEC1_SGMII_CTRL_ANEN, 0);
- if (ret < 0)
+ if (ret < 0) {
phydev_err(phydev,
"Error: Disable of SGMII ANEG failed: %d\n",
ret);
+ return ret;
+ }
break;
case SPEED_1000:
case SPEED_100:
@@ -466,15 +538,22 @@ static void gpy_update_interface(struct phy_device *phydev)
ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
VSPEC1_SGMII_ANEN_ANRS,
VSPEC1_SGMII_ANEN_ANRS);
- if (ret < 0)
+ if (ret < 0) {
phydev_err(phydev,
"Error: Enable of SGMII ANEG failed: %d\n",
ret);
+ return ret;
+ }
break;
}
- if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000)
- genphy_read_master_slave(phydev);
+ if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return gpy_update_mdix(phydev);
}
static int gpy_read_status(struct phy_device *phydev)
@@ -525,8 +604,11 @@ static int gpy_read_status(struct phy_device *phydev)
break;
}
- if (phydev->link)
- gpy_update_interface(phydev);
+ if (phydev->link) {
+ ret = gpy_update_interface(phydev);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 2c8bf438ea61..5d08c627a516 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 99,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -49,6 +49,8 @@ const char *phy_speed_to_str(int speed)
return "200Gbps";
case SPEED_400000:
return "400Gbps";
+ case SPEED_800000:
+ return "800Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -157,6 +159,13 @@ EXPORT_SYMBOL_GPL(phy_interface_num_ports);
.bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
static const struct phy_setting settings[] = {
+ /* 800G */
+ PHY_SETTING( 800000, FULL, 800000baseCR8_Full ),
+ PHY_SETTING( 800000, FULL, 800000baseKR8_Full ),
+ PHY_SETTING( 800000, FULL, 800000baseDR8_Full ),
+ PHY_SETTING( 800000, FULL, 800000baseDR8_2_Full ),
+ PHY_SETTING( 800000, FULL, 800000baseSR8_Full ),
+ PHY_SETTING( 800000, FULL, 800000baseVR8_Full ),
/* 400G */
PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e741d8aebffe..e5b6cb1a77f9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -67,6 +67,7 @@ static void phy_link_down(struct phy_device *phydev)
{
phydev->phy_link_change(phydev, false);
phy_led_trigger_change_speed(phydev);
+ WRITE_ONCE(phydev->link_down_events, phydev->link_down_events + 1);
}
static const char *phy_pause_str(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8cff61dbc4b5..716870a4499c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1512,6 +1512,15 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phy_resume(phydev);
phy_led_triggers_register(phydev);
+ /**
+ * If the external phy used by current mac interface is managed by
+ * another mac interface, so we should create a device link between
+ * phy dev and mac dev.
+ */
+ if (phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
+ phydev->devlink = device_link_add(dev->dev.parent, &phydev->mdio.dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+
return err;
error:
@@ -1750,6 +1759,9 @@ void phy_detach(struct phy_device *phydev)
struct module *ndev_owner = NULL;
struct mii_bus *bus;
+ if (phydev->devlink)
+ device_link_del(phydev->devlink);
+
if (phydev->sysfs_links) {
if (dev)
sysfs_remove_link(&dev->dev.kobj, "phydev");
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 2805b04d6402..09cc65c0da93 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -562,32 +562,48 @@ unsigned long phylink_get_capabilities(phy_interface_t interface,
EXPORT_SYMBOL_GPL(phylink_get_capabilities);
/**
- * phylink_generic_validate() - generic validate() callback implementation
- * @config: a pointer to a &struct phylink_config.
+ * phylink_validate_mask_caps() - Restrict link modes based on caps
* @supported: ethtool bitmask for supported link modes.
- * @state: a pointer to a &struct phylink_link_state.
+ * @state: pointer to a &struct phylink_link_state.
+ * @mac_capabilities: bitmask of MAC capabilities
*
- * Generic implementation of the validate() callback that MAC drivers can
- * use when they pass the range of supported interfaces and MAC capabilities.
- * This makes use of phylink_get_linkmodes().
+ * Calculate the supported link modes based on @mac_capabilities, and restrict
+ * @supported and @state based on that. Use this function if your capabiliies
+ * aren't constant, such as if they vary depending on the interface.
*/
-void phylink_generic_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+void phylink_validate_mask_caps(unsigned long *supported,
+ struct phylink_link_state *state,
+ unsigned long mac_capabilities)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
unsigned long caps;
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
- caps = phylink_get_capabilities(state->interface,
- config->mac_capabilities,
+ caps = phylink_get_capabilities(state->interface, mac_capabilities,
state->rate_matching);
phylink_caps_to_linkmodes(mask, caps);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
}
+EXPORT_SYMBOL_GPL(phylink_validate_mask_caps);
+
+/**
+ * phylink_generic_validate() - generic validate() callback implementation
+ * @config: a pointer to a &struct phylink_config.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Generic implementation of the validate() callback that MAC drivers can
+ * use when they pass the range of supported interfaces and MAC capabilities.
+ */
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ phylink_validate_mask_caps(supported, state, config->mac_capabilities);
+}
EXPORT_SYMBOL_GPL(phylink_generic_validate);
static int phylink_validate_mac_and_pcs(struct phylink *pl,
@@ -633,7 +649,10 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
}
/* Then validate the link parameters with the MAC */
- pl->mac_ops->validate(pl->config, supported, state);
+ if (pl->mac_ops->validate)
+ pl->mac_ops->validate(pl->config, supported, state);
+ else
+ phylink_generic_validate(pl->config, supported, state);
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 40c9a64c5e30..83b99d95b278 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -608,6 +608,22 @@ static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
return sfp->write(sfp, a2, addr, buf, len);
}
+static int sfp_modify_u8(struct sfp *sfp, bool a2, u8 addr, u8 mask, u8 val)
+{
+ int ret;
+ u8 old, v;
+
+ ret = sfp_read(sfp, a2, addr, &old, sizeof(old));
+ if (ret != sizeof(old))
+ return ret;
+
+ v = (old & ~mask) | (val & mask);
+ if (v == old)
+ return sizeof(v);
+
+ return sfp_write(sfp, a2, addr, &v, sizeof(v));
+}
+
static unsigned int sfp_soft_get_state(struct sfp *sfp)
{
unsigned int state = 0;
@@ -633,17 +649,14 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
static void sfp_soft_set_state(struct sfp *sfp, unsigned int state)
{
- u8 status;
+ u8 mask = SFP_STATUS_TX_DISABLE_FORCE;
+ u8 val = 0;
- if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
- sizeof(status)) {
- if (state & SFP_F_TX_DISABLE)
- status |= SFP_STATUS_TX_DISABLE_FORCE;
- else
- status &= ~SFP_STATUS_TX_DISABLE_FORCE;
+ if (state & SFP_F_TX_DISABLE)
+ val |= SFP_STATUS_TX_DISABLE_FORCE;
- sfp_write(sfp, true, SFP_STATUS, &status, sizeof(status));
- }
+
+ sfp_modify_u8(sfp, true, SFP_STATUS, mask, val);
}
static void sfp_soft_start_poll(struct sfp *sfp)
@@ -1761,11 +1774,20 @@ static int sfp_module_parse_power(struct sfp *sfp)
u32 power_mW = 1000;
bool supports_a2;
- if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
+ if (sfp->id.ext.sff8472_compliance >= SFP_SFF8472_COMPLIANCE_REV10_2 &&
+ sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
power_mW = 1500;
- if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
+ /* Added in Rev 11.9, but there is no compliance code for this */
+ if (sfp->id.ext.sff8472_compliance >= SFP_SFF8472_COMPLIANCE_REV11_4 &&
+ sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
power_mW = 2000;
+ /* Power level 1 modules (max. 1W) are always supported. */
+ if (power_mW <= 1000) {
+ sfp->module_power_mW = power_mW;
+ return 0;
+ }
+
supports_a2 = sfp->id.ext.sff8472_compliance !=
SFP_SFF8472_COMPLIANCE_NONE ||
sfp->id.ext.diagmon & SFP_DIAGMON_DDM;
@@ -1789,12 +1811,6 @@ static int sfp_module_parse_power(struct sfp *sfp)
}
}
- if (power_mW <= 1000) {
- /* Modules below 1W do not require a power change sequence */
- sfp->module_power_mW = power_mW;
- return 0;
- }
-
if (!supports_a2) {
/* The module power level is below the host maximum and the
* module appears not to implement bus address 0xa2, so assume
@@ -1821,31 +1837,14 @@ static int sfp_module_parse_power(struct sfp *sfp)
static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
{
- u8 val;
int err;
- err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
- if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to read EEPROM: %pe\n", ERR_PTR(err));
- return -EAGAIN;
- }
-
- /* DM7052 reports as a high power module, responds to reads (with
- * all bytes 0xff) at 0x51 but does not accept writes. In any case,
- * if the bit is already set, we're already in high power mode.
- */
- if (!!(val & BIT(0)) == enable)
- return 0;
-
- if (enable)
- val |= BIT(0);
- else
- val &= ~BIT(0);
-
- err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
- if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to write EEPROM: %pe\n",
- ERR_PTR(err));
+ err = sfp_modify_u8(sfp, true, SFP_EXT_STATUS,
+ SFP_EXT_STATUS_PWRLVL_SELECT,
+ enable ? SFP_EXT_STATUS_PWRLVL_SELECT : 0);
+ if (err != sizeof(u8)) {
+ dev_err(sfp->dev, "failed to %sable high power: %pe\n",
+ enable ? "en" : "dis", ERR_PTR(err));
return -EAGAIN;
}
@@ -2643,10 +2642,46 @@ static void sfp_cleanup(void *data)
kfree(sfp);
}
+static int sfp_i2c_get(struct sfp *sfp)
+{
+ struct acpi_handle *acpi_handle;
+ struct fwnode_handle *h;
+ struct i2c_adapter *i2c;
+ struct device_node *np;
+ int err;
+
+ h = fwnode_find_reference(dev_fwnode(sfp->dev), "i2c-bus", 0);
+ if (IS_ERR(h)) {
+ dev_err(sfp->dev, "missing 'i2c-bus' property\n");
+ return -ENODEV;
+ }
+
+ if (is_acpi_device_node(h)) {
+ acpi_handle = ACPI_HANDLE_FWNODE(h);
+ i2c = i2c_acpi_find_adapter_by_handle(acpi_handle);
+ } else if ((np = to_of_node(h)) != NULL) {
+ i2c = of_find_i2c_adapter_by_node(np);
+ } else {
+ err = -EINVAL;
+ goto put;
+ }
+
+ if (!i2c) {
+ err = -EPROBE_DEFER;
+ goto put;
+ }
+
+ err = sfp_i2c_configure(sfp, i2c);
+ if (err)
+ i2c_put_adapter(i2c);
+put:
+ fwnode_handle_put(h);
+ return err;
+}
+
static int sfp_probe(struct platform_device *pdev)
{
const struct sff_data *sff;
- struct i2c_adapter *i2c;
char *sfp_irq_name;
struct sfp *sfp;
int err, i;
@@ -2664,51 +2699,20 @@ static int sfp_probe(struct platform_device *pdev)
sff = sfp->type = &sfp_data;
if (pdev->dev.of_node) {
- struct device_node *node = pdev->dev.of_node;
const struct of_device_id *id;
- struct device_node *np;
- id = of_match_node(sfp_of_match, node);
+ id = of_match_node(sfp_of_match, pdev->dev.of_node);
if (WARN_ON(!id))
return -EINVAL;
sff = sfp->type = id->data;
-
- np = of_parse_phandle(node, "i2c-bus", 0);
- if (!np) {
- dev_err(sfp->dev, "missing 'i2c-bus' property\n");
- return -ENODEV;
- }
-
- i2c = of_find_i2c_adapter_by_node(np);
- of_node_put(np);
- } else if (has_acpi_companion(&pdev->dev)) {
- struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
- struct fwnode_handle *fw = acpi_fwnode_handle(adev);
- struct fwnode_reference_args args;
- struct acpi_handle *acpi_handle;
- int ret;
-
- ret = acpi_node_get_property_reference(fw, "i2c-bus", 0, &args);
- if (ret || !is_acpi_device_node(args.fwnode)) {
- dev_err(&pdev->dev, "missing 'i2c-bus' property\n");
- return -ENODEV;
- }
-
- acpi_handle = ACPI_HANDLE_FWNODE(args.fwnode);
- i2c = i2c_acpi_find_adapter_by_handle(acpi_handle);
- } else {
+ } else if (!has_acpi_companion(&pdev->dev)) {
return -EINVAL;
}
- if (!i2c)
- return -EPROBE_DEFER;
-
- err = sfp_i2c_configure(sfp, i2c);
- if (err < 0) {
- i2c_put_adapter(i2c);
+ err = sfp_i2c_get(sfp);
+ if (err)
return err;
- }
for (i = 0; i < GPIO_MAX; i++)
if (sff->gpios & BIT(i)) {
@@ -2729,8 +2733,12 @@ static int sfp_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "maximum-power-milliwatt",
&sfp->max_power_mW);
- if (!sfp->max_power_mW)
+ if (sfp->max_power_mW < 1000) {
+ if (sfp->max_power_mW)
+ dev_warn(sfp->dev,
+ "Firmware bug: host maximum power should be at least 1W\n");
sfp->max_power_mW = 1000;
+ }
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9206c660a72e..d4c821c8cf57 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1743,6 +1743,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
int len;
unsigned char *cp;
+ skb->dev = ppp->dev;
+
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
/* check if we should pass this packet */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 62ade69295a9..d10606f257c4 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1865,13 +1865,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
rx_packets = u64_stats_read(&p->rx_packets);
rx_bytes = u64_stats_read(&p->rx_bytes);
rx_multicast = u64_stats_read(&p->rx_multicast);
tx_packets = u64_stats_read(&p->tx_packets);
tx_bytes = u64_stats_read(&p->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index b095a4b4957b..18d99fda997c 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -466,9 +466,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
struct lb_stats tmp;
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
tmp.tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
acc_stats->tx_bytes += tmp.tx_bytes;
}
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 6312f67f260e..990484776f2d 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -58,10 +58,10 @@
* supported then @frame_id is filled, otherwise it stays %0.
*/
struct thunderbolt_ip_frame_header {
- u32 frame_size;
- u16 frame_index;
- u16 frame_id;
- u32 frame_count;
+ __le32 frame_size;
+ __le16 frame_index;
+ __le16 frame_id;
+ __le32 frame_count;
};
enum thunderbolt_ip_frame_pdf {
@@ -1052,7 +1052,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
*len = skb_frag_size(frag);
- return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
+ return kmap_local_page(skb_frag_page(frag)) + skb_frag_off(frag);
}
static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
@@ -1110,7 +1110,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
dest += len;
if (unmap) {
- kunmap_atomic(src);
+ kunmap_local(src);
unmap = false;
}
@@ -1148,7 +1148,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
dest += len;
if (unmap) {
- kunmap_atomic(src);
+ kunmap_local(src);
unmap = false;
}
@@ -1163,7 +1163,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
memcpy(dest, src, data_len);
if (unmap)
- kunmap_atomic(src);
+ kunmap_local(src);
if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
goto err_drop;
@@ -1320,7 +1320,7 @@ static void tbnet_shutdown(struct tb_service *svc)
tbnet_tear_down(tb_service_get_drvdata(svc), true);
}
-static int __maybe_unused tbnet_suspend(struct device *dev)
+static int tbnet_suspend(struct device *dev)
{
struct tb_service *svc = tb_to_service(dev);
struct tbnet *net = tb_service_get_drvdata(svc);
@@ -1335,7 +1335,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused tbnet_resume(struct device *dev)
+static int tbnet_resume(struct device *dev)
{
struct tb_service *svc = tb_to_service(dev);
struct tbnet *net = tb_service_get_drvdata(svc);
@@ -1351,9 +1351,7 @@ static int __maybe_unused tbnet_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops tbnet_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tbnet_pm_ops, tbnet_suspend, tbnet_resume);
static const struct tb_service_id tbnet_ids[] = {
{ TB_SERVICE("network", 1) },
@@ -1365,7 +1363,7 @@ static struct tb_service_driver tbnet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "thunderbolt-net",
- .pm = &tbnet_pm_ops,
+ .pm = pm_sleep_ptr(&tbnet_pm_ops),
},
.probe = tbnet_probe,
.remove = tbnet_remove,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 24001112c323..b4baa2001a63 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1748,7 +1748,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
- enum skb_drop_reason drop_reason;
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
@@ -1809,10 +1809,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* skb was created with generic XDP routine.
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
- if (IS_ERR(skb)) {
- dev_core_stats_rx_dropped_inc(tun->dev);
- return PTR_ERR(skb);
- }
+ err = PTR_ERR_OR_ZERO(skb);
+ if (err)
+ goto drop;
if (!skb)
return total_len;
} else {
@@ -1837,13 +1836,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
noblock);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) != -EAGAIN)
- dev_core_stats_rx_dropped_inc(tun->dev);
- if (frags)
- mutex_unlock(&tfile->napi_mutex);
- return PTR_ERR(skb);
- }
+ err = PTR_ERR_OR_ZERO(skb);
+ if (err)
+ goto drop;
if (zerocopy)
err = zerocopy_sg_from_iter(skb, from);
@@ -1853,27 +1848,14 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
err = -EFAULT;
drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
-drop:
- dev_core_stats_rx_dropped_inc(tun->dev);
- kfree_skb_reason(skb, drop_reason);
- if (frags) {
- tfile->napi.skb = NULL;
- mutex_unlock(&tfile->napi_mutex);
- }
-
- return err;
+ goto drop;
}
}
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
atomic_long_inc(&tun->rx_frame_errors);
- kfree_skb(skb);
- if (frags) {
- tfile->napi.skb = NULL;
- mutex_unlock(&tfile->napi_mutex);
- }
-
- return -EINVAL;
+ err = -EINVAL;
+ goto free_skb;
}
switch (tun->flags & TUN_TYPE_MASK) {
@@ -1889,9 +1871,8 @@ drop:
pi.proto = htons(ETH_P_IPV6);
break;
default:
- dev_core_stats_rx_dropped_inc(tun->dev);
- kfree_skb(skb);
- return -EINVAL;
+ err = -EINVAL;
+ goto drop;
}
}
@@ -1933,11 +1914,7 @@ drop:
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
- if (frags) {
- tfile->napi.skb = NULL;
- mutex_unlock(&tfile->napi_mutex);
- }
- return total_len;
+ goto unlock_frags;
}
}
rcu_read_unlock();
@@ -2017,6 +1994,22 @@ napi_busy:
tun_flow_update(tun, rxhash, tfile);
return total_len;
+
+drop:
+ if (err != -EAGAIN)
+ dev_core_stats_rx_dropped_inc(tun->dev);
+
+free_skb:
+ if (!IS_ERR_OR_NULL(skb))
+ kfree_skb_reason(skb, drop_reason);
+
+unlock_frags:
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
+
+ return err ?: total_len;
}
static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
@@ -3525,7 +3518,7 @@ static void tun_default_link_ksettings(struct net_device *dev,
{
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- cmd->base.speed = SPEED_10;
+ cmd->base.speed = SPEED_10000;
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_TP;
cmd->base.phy_address = 0;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 11f60d32be82..743cbf5d662c 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -700,7 +700,7 @@ static int ax88772_init_phy(struct usbnet *dev)
}
phy_suspend(priv->phydev);
- priv->phydev->mac_managed_pm = 1;
+ priv->phydev->mac_managed_pm = true;
phy_attached_info(priv->phydev);
@@ -720,7 +720,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return -ENODEV;
}
- priv->phydev_int->mac_managed_pm = 1;
+ priv->phydev_int->mac_managed_pm = true;
phy_suspend(priv->phydev_int);
return 0;
@@ -773,7 +773,6 @@ static void ax88772_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
- .validate = phylink_generic_validate,
.mac_config = ax88772_mac_config,
.mac_link_down = ax88772_mac_link_down,
.mac_link_up = ax88772_mac_link_up,
@@ -1351,6 +1350,20 @@ static const struct driver_info ax88772b_info = {
.data = FLAG_EEPROM_MAC,
};
+static const struct driver_info lxausb_t1l_info = {
+ .description = "Linux Automation GmbH USB 10Base-T1L",
+ .bind = ax88772_bind,
+ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .reset = ax88772_reset,
+ .stop = ax88772_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = FLAG_EEPROM_MAC,
+};
+
static const struct driver_info ax88178_info = {
.description = "ASIX AX88178 USB 2.0 Ethernet",
.bind = ax88178_bind,
@@ -1539,6 +1552,10 @@ static const struct usb_device_id products [] = {
*/
USB_DEVICE(0x066b, 0x20f9),
.driver_info = (unsigned long) &hg20f9_info,
+}, {
+ // Linux Automation GmbH USB 10Base-T1L
+ USB_DEVICE(0x33f7, 0x0004),
+ .driver_info = (unsigned long) &lxausb_t1l_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index e11f70911acc..8911cd2ed534 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -990,6 +990,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* U-blox LARA-L6 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1343, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* Cinterion PLS8 modem by GEMALTO */
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 0897fdb6254b..6ce8f4f0c70e 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -43,6 +43,7 @@
#include <linux/ctype.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/kstrtox.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/crc32.h>
@@ -318,7 +319,7 @@ static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
bool enable;
- if (strtobool(buf, &enable))
+ if (kstrtobool(buf, &enable))
return -EINVAL;
/* no change? */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 554d4e2a84a4..a808d718c012 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -13,6 +13,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <linux/kstrtox.h>
#include <linux/mii.h>
#include <linux/rtnetlink.h>
#include <linux/usb.h>
@@ -343,7 +344,7 @@ static ssize_t raw_ip_store(struct device *d, struct device_attribute *attr, co
bool enable;
int ret;
- if (strtobool(buf, &enable))
+ if (kstrtobool(buf, &enable))
return -EINVAL;
/* no change? */
@@ -492,7 +493,7 @@ static ssize_t pass_through_store(struct device *d,
struct qmi_wwan_state *info;
bool enable;
- if (strtobool(buf, &enable))
+ if (kstrtobool(buf, &enable))
return -EINVAL;
info = (void *)&dev->data;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 09682ea3354e..ac7c0653695f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -182,12 +182,12 @@ static void veth_get_ethtool_stats(struct net_device *dev,
size_t offset;
do {
- start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ start = u64_stats_fetch_begin(&rq_stats->syncp);
for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
offset = veth_rq_stats_desc[j].offset;
data[idx + j] = *(u64 *)(stats_base + offset);
}
- } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
idx += VETH_RQ_STATS_LEN;
}
@@ -203,12 +203,12 @@ static void veth_get_ethtool_stats(struct net_device *dev,
tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
do {
- start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ start = u64_stats_fetch_begin(&rq_stats->syncp);
for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
offset = veth_tq_stats_desc[j].offset;
data[tx_idx + j] += *(u64 *)(base + offset);
}
- } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
}
}
@@ -379,13 +379,13 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
xdp_tx_err = stats->vs.xdp_tx_err;
packets = stats->vs.xdp_packets;
bytes = stats->vs.xdp_bytes;
drops = stats->vs.rx_drops;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
result->xdp_tx_err += xdp_tx_err;
result->xdp_packets += packets;
@@ -1773,7 +1773,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
veth_disable_gro(peer);
netif_carrier_off(peer);
- err = rtnl_configure_link(peer, ifmp);
+ err = rtnl_configure_link(peer, ifmp, 0, NULL);
if (err < 0)
goto err_configure_peer;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 86e52454b5b5..19eee0655b99 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2069,18 +2069,18 @@ static void virtnet_stats(struct net_device *dev,
struct send_queue *sq = &vi->sq[i];
do {
- start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ start = u64_stats_fetch_begin(&sq->stats.syncp);
tpackets = sq->stats.packets;
tbytes = sq->stats.bytes;
terrors = sq->stats.tx_timeouts;
- } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ start = u64_stats_fetch_begin(&rq->stats.syncp);
rpackets = rq->stats.packets;
rbytes = rq->stats.bytes;
rdrops = rq->stats.drops;
- } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -2691,12 +2691,12 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
stats_base = (u8 *)&rq->stats;
do {
- start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ start = u64_stats_fetch_begin(&rq->stats.syncp);
for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
offset = virtnet_rq_stats_desc[j].offset;
data[idx + j] = *(u64 *)(stats_base + offset);
}
- } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
idx += VIRTNET_RQ_STATS_LEN;
}
@@ -2705,12 +2705,12 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
stats_base = (u8 *)&sq->stats;
do {
- start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ start = u64_stats_fetch_begin(&sq->stats.syncp);
for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
offset = virtnet_sq_stats_desc[j].offset;
data[idx + j] = *(u64 *)(stats_base + offset);
}
- } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
idx += VIRTNET_SQ_STATS_LEN;
}
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index badf6f09ae51..6b5a4d036d15 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -159,13 +159,13 @@ static void vrf_get_stats64(struct net_device *dev,
dstats = per_cpu_ptr(dev->dstats, i);
do {
- start = u64_stats_fetch_begin_irq(&dstats->syncp);
+ start = u64_stats_fetch_begin(&dstats->syncp);
tbytes = dstats->tx_bytes;
tpkts = dstats->tx_pkts;
tdrops = dstats->tx_drps;
rbytes = dstats->rx_bytes;
rpkts = dstats->rx_pkts;
- } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
+ } while (u64_stats_fetch_retry(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
stats->tx_dropped += tdrops;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 6ab669dcd1c6..92224b36787a 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -3794,7 +3794,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
goto errout;
}
- err = rtnl_configure_link(dev, NULL);
+ err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto unlink;
@@ -4416,7 +4416,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
return ERR_PTR(err);
}
- err = rtnl_configure_link(dev, NULL);
+ err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0) {
LIST_HEAD(list_kill);
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 3e04af4c5daa..a3de081cda5e 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -129,9 +129,9 @@ static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
pstats = per_cpu_ptr(vninode->stats, i);
do {
- start = u64_stats_fetch_begin_irq(&pstats->syncp);
+ start = u64_stats_fetch_begin(&pstats->syncp);
memcpy(&temp, &pstats->stats, sizeof(temp));
- } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
+ } while (u64_stats_fetch_retry(&pstats->syncp, start));
dest->rx_packets += temp.rx_packets;
dest->rx_bytes += temp.rx_bytes;
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 6bee16b207d1..2fceea9f6550 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1760,6 +1760,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
static const struct ieee80211_ops adm8211_ops = {
.tx = adm8211_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
.stop = adm8211_stop,
.add_interface = adm8211_add_interface,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 6f937d2cc126..19f61225a708 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb)
}
}
+static void ar5523_cancel_tx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->tx_cmd.urb_tx);
+}
+
static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
int ilen, void *odata, int olen, int flags)
{
@@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
}
if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+ ar5523_cancel_tx_cmd(ar);
cmd->odata = NULL;
ar5523_err(ar, "timeout waiting for command %02x reply\n",
code);
@@ -1355,6 +1361,7 @@ static const struct ieee80211_ops ar5523_ops = {
.start = ar5523_start,
.stop = ar5523_stop,
.tx = ar5523_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_rts_threshold = ar5523_set_rts_threshold,
.add_interface = ar5523_add_interface,
.remove_interface = ar5523_remove_interface,
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index ca007b800f75..e6ea884cafc1 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -44,6 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_SMEM
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 400f332a7ff0..5eb131ab916f 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -99,6 +99,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -138,6 +139,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -178,6 +180,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -213,6 +216,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = true,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -252,6 +256,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -291,6 +296,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -330,6 +336,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -373,6 +380,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = true,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -418,6 +426,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -470,6 +479,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -519,6 +529,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -558,6 +569,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -599,6 +611,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -631,6 +644,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -677,6 +691,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -709,6 +724,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dynamic_sar_support = true,
.hw_restart_disconnect = true,
.use_fw_tx_credits = false,
+ .delay_unmap_buffer = true,
},
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index c861e66ef6bc..b9aea1510f7b 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -10,6 +10,7 @@
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/kstrtox.h>
#include "core.h"
#include "debug.h"
@@ -1975,7 +1976,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
buf[buf_size] = '\0';
- if (strtobool(buf, &val) != 0)
+ if (kstrtobool(buf, &val) != 0)
return -EINVAL;
if (!ar->coex_support)
@@ -2113,7 +2114,7 @@ static ssize_t ath10k_write_peer_stats(struct file *file,
buf[buf_size] = '\0';
- if (strtobool(buf, &val) != 0)
+ if (kstrtobool(buf, &val) != 0)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 6d1784f74bea..5bfeecb95fca 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -56,6 +56,15 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
+ /* A corner case where the copy completion is reaching to host but still
+ * copy engine is processing it due to which host unmaps corresponding
+ * memory and causes SMMU fault, hence as workaround adding delay
+ * the unmapping memory to avoid SMMU faults.
+ */
+ if (ar->hw_params.delay_unmap_buffer &&
+ ep->ul_pipe_id == 3)
+ mdelay(2);
+
hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index f06cf39204e2..c051a22fce14 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1108,8 +1108,10 @@ struct htt_rx_in_ord_ind {
u8 reserved;
__le16 msdu_count;
union {
- struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
- struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+ DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc,
+ msdu_descs32);
+ DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc_ext,
+ msdu_descs64);
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e76aab973320..438b0caaceb7 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1379,7 +1379,7 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_get_tid(hdr, tid, sizeof(tid)),
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
- (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
@@ -1844,15 +1844,14 @@ static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
- u16 offset,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
- hdr = (struct ieee80211_hdr *)(skb->data + offset);
- ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
pn = ehdr[0];
@@ -1866,19 +1865,17 @@ static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
}
static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
- struct sk_buff *skb,
- u16 offset)
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ hdr = (struct ieee80211_hdr *)skb->data;
return !is_multicast_ether_addr(hdr->addr1);
}
static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
struct sk_buff *skb,
u16 peer_id,
- u16 offset,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_peer *peer;
@@ -1893,16 +1890,16 @@ static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
return false;
}
- hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr);
else
tid = ATH10K_TXRX_NON_QOS_TID;
last_pn = &peer->frag_tids_last_pn[tid];
- new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
- seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
if (frag_number == 0) {
last_pn->pn48 = new_pn.pn48;
@@ -2059,13 +2056,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
msdu,
peer_id,
- 0,
enctype);
if (frag)
multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
- msdu,
- 0);
+ msdu);
if (!frag_pn_check || !multicast_check) {
/* Discard the fragment with invalid PN or multicast DA
@@ -2824,7 +2819,7 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
hdr_space = ieee80211_hdrlen(hdr->frame_control);
sc = __le16_to_cpu(hdr->seq_ctrl);
- seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
+ seq = IEEE80211_SEQ_TO_SN(sc);
frag = sc & IEEE80211_SCTL_FRAG;
sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 1b99f3a39a11..9643031a4427 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -637,6 +637,8 @@ struct ath10k_hw_params {
bool hw_restart_disconnect;
bool use_fw_tx_credits;
+
+ bool delay_unmap_buffer;
};
struct htt_resp;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e56c6a6b1379..728d607289c3 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3792,18 +3792,22 @@ static struct pci_driver ath10k_pci_driver = {
static int __init ath10k_pci_init(void)
{
- int ret;
+ int ret1, ret2;
- ret = pci_register_driver(&ath10k_pci_driver);
- if (ret)
+ ret1 = pci_register_driver(&ath10k_pci_driver);
+ if (ret1)
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
- ret);
+ ret1);
- ret = ath10k_ahb_init();
- if (ret)
- printk(KERN_ERR "ahb init failed: %d\n", ret);
+ ret2 = ath10k_ahb_init();
+ if (ret2)
+ printk(KERN_ERR "ahb init failed: %d\n", ret2);
- return ret;
+ if (ret1 && ret2)
+ return ret1;
+
+ /* registered to at least one bus */
+ return 0;
}
module_init(ath10k_pci_init);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 66cb7a1e628a..3f94fbf83702 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -14,6 +14,7 @@
#include <linux/net.h>
#include <linux/platform_device.h>
#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
#include <linux/string.h>
#include <net/sock.h>
@@ -22,6 +23,10 @@
#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
#define ATH10K_QMI_TIMEOUT 30
+#define SMEM_IMAGE_VERSION_TABLE 469
+#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
+#define SMEM_IMAGE_VERSION_ENTRY_SIZE 128
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
struct ath10k_msa_mem_info *mem_info)
@@ -536,6 +541,33 @@ int ath10k_qmi_wlan_disable(struct ath10k *ar)
return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
}
+static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id)
+{
+ u8 *table_ptr;
+ size_t smem_item_size;
+ const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX *
+ SMEM_IMAGE_VERSION_ENTRY_SIZE;
+
+ table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+ SMEM_IMAGE_VERSION_TABLE,
+ &smem_item_size);
+
+ if (IS_ERR(table_ptr)) {
+ ath10k_err(ar, "smem image version table not found\n");
+ return;
+ }
+
+ if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE >
+ smem_item_size) {
+ ath10k_err(ar, "smem block size too small: %zu\n",
+ smem_item_size);
+ return;
+ }
+
+ strscpy(table_ptr + smem_img_idx_wlan, fw_build_id,
+ SMEM_IMAGE_VERSION_NAME_SIZE);
+}
+
static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
{
struct wlfw_cap_resp_msg_v01 *resp;
@@ -606,6 +638,9 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
}
+ if (resp->fw_build_id_valid)
+ ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id);
+
kfree(resp);
return 0;
@@ -618,7 +653,7 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
{
struct wlfw_host_cap_resp_msg_v01 resp = {};
struct wlfw_host_cap_req_msg_v01 req = {};
- struct qmi_elem_info *req_ei;
+ const struct qmi_elem_info *req_ei;
struct ath10k *ar = qmi->ar;
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
index 86fcf4e1de5f..1c81e454f943 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include "qmi_wlfw_v01.h"
-static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -56,7 +56,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -87,7 +87,7 @@ static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@@ -109,7 +109,7 @@ static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -122,7 +122,7 @@ static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -153,7 +153,7 @@ static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -184,7 +184,7 @@ static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -225,7 +225,7 @@ static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -256,7 +256,7 @@ static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -278,7 +278,7 @@ static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -291,7 +291,7 @@ static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -304,7 +304,7 @@ static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
{}
};
-static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -326,7 +326,7 @@ static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -528,7 +528,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -560,15 +560,15 @@ struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -626,7 +626,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@@ -657,7 +657,7 @@ struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -671,7 +671,7 @@ struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -805,7 +805,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -819,11 +819,11 @@ struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -949,7 +949,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1079,7 +1079,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1093,7 +1093,7 @@ struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -1133,7 +1133,7 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1147,7 +1147,7 @@ struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@@ -1160,7 +1160,7 @@ struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1272,7 +1272,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1286,7 +1286,7 @@ struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@@ -1308,7 +1308,7 @@ struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@@ -1330,7 +1330,7 @@ struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1443,7 +1443,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1465,7 +1465,7 @@ struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1498,11 +1498,11 @@ struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1516,7 +1516,7 @@ struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1538,7 +1538,7 @@ struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1552,7 +1552,7 @@ struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1583,7 +1583,7 @@ struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1624,7 +1624,7 @@ struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1664,7 +1664,7 @@ struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1678,7 +1678,7 @@ struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1691,7 +1691,7 @@ struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1705,7 +1705,7 @@ struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1727,7 +1727,7 @@ struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1741,7 +1741,7 @@ struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1988,7 +1988,7 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -2010,7 +2010,7 @@ struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -2024,7 +2024,7 @@ struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -2047,7 +2047,7 @@ struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -2070,7 +2070,7 @@ struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -2084,15 +2084,15 @@ struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -2168,11 +2168,11 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -2186,7 +2186,7 @@ struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -2208,7 +2208,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -2258,7 +2258,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -2280,7 +2280,7 @@ struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -2294,7 +2294,7 @@ struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
{}
};
-struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
index 4d107e1364a8..f0db991408dc 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -215,7 +215,7 @@ struct wlfw_ind_register_req_msg_v01 {
};
#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
-extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
struct wlfw_ind_register_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@@ -224,21 +224,21 @@ struct wlfw_ind_register_resp_msg_v01 {
};
#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
-extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
struct wlfw_fw_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
struct wlfw_msa_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
struct wlfw_pin_connect_result_ind_msg_v01 {
u8 pwr_pin_result_valid;
@@ -250,7 +250,7 @@ struct wlfw_pin_connect_result_ind_msg_v01 {
};
#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
-extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
struct wlfw_wlan_mode_req_msg_v01 {
enum wlfw_driver_mode_enum_v01 mode;
@@ -259,14 +259,14 @@ struct wlfw_wlan_mode_req_msg_v01 {
};
#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
-extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
struct wlfw_wlan_mode_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
struct wlfw_wlan_cfg_req_msg_v01 {
u8 host_version_valid;
@@ -286,21 +286,21 @@ struct wlfw_wlan_cfg_req_msg_v01 {
};
#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
-extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
struct wlfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
struct wlfw_cap_req_msg_v01 {
char placeholder;
};
#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
struct wlfw_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@@ -319,7 +319,7 @@ struct wlfw_cap_resp_msg_v01 {
};
#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
-extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
struct wlfw_bdf_download_req_msg_v01 {
u8 valid;
@@ -339,14 +339,14 @@ struct wlfw_bdf_download_req_msg_v01 {
};
#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
-extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
struct wlfw_bdf_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
struct wlfw_cal_report_req_msg_v01 {
u32 meta_data_len;
@@ -356,21 +356,21 @@ struct wlfw_cal_report_req_msg_v01 {
};
#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
-extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
struct wlfw_cal_report_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
struct wlfw_initiate_cal_download_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
};
#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
struct wlfw_cal_download_req_msg_v01 {
u8 valid;
@@ -388,14 +388,14 @@ struct wlfw_cal_download_req_msg_v01 {
};
#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
-extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
struct wlfw_cal_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
struct wlfw_initiate_cal_update_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
@@ -403,7 +403,7 @@ struct wlfw_initiate_cal_update_ind_msg_v01 {
};
#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
-extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
struct wlfw_cal_update_req_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
@@ -411,7 +411,7 @@ struct wlfw_cal_update_req_msg_v01 {
};
#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
-extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
struct wlfw_cal_update_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@@ -429,7 +429,7 @@ struct wlfw_cal_update_resp_msg_v01 {
};
#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
-extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
struct wlfw_msa_info_req_msg_v01 {
u64 msa_addr;
@@ -437,7 +437,7 @@ struct wlfw_msa_info_req_msg_v01 {
};
#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
-extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
struct wlfw_msa_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@@ -446,21 +446,21 @@ struct wlfw_msa_info_resp_msg_v01 {
};
#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
-extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
struct wlfw_msa_ready_req_msg_v01 {
char placeholder;
};
#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
struct wlfw_msa_ready_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
struct wlfw_ini_req_msg_v01 {
u8 enablefwlog_valid;
@@ -468,14 +468,14 @@ struct wlfw_ini_req_msg_v01 {
};
#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
-extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
struct wlfw_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
struct wlfw_athdiag_read_req_msg_v01 {
u32 offset;
@@ -484,7 +484,7 @@ struct wlfw_athdiag_read_req_msg_v01 {
};
#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
-extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
struct wlfw_athdiag_read_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@@ -494,7 +494,7 @@ struct wlfw_athdiag_read_resp_msg_v01 {
};
#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
-extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
struct wlfw_athdiag_write_req_msg_v01 {
u32 offset;
@@ -504,28 +504,28 @@ struct wlfw_athdiag_write_req_msg_v01 {
};
#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
-extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
struct wlfw_athdiag_write_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
struct wlfw_vbatt_req_msg_v01 {
u64 voltage_uv;
};
#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
-extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
struct wlfw_vbatt_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
struct wlfw_mac_addr_req_msg_v01 {
u8 mac_addr_valid;
@@ -533,14 +533,14 @@ struct wlfw_mac_addr_req_msg_v01 {
};
#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
-extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
struct wlfw_mac_addr_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
#define QMI_WLFW_MAX_NUM_GPIO_V01 32
struct wlfw_host_cap_req_msg_v01 {
@@ -574,15 +574,15 @@ struct wlfw_host_cap_req_msg_v01 {
};
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
-extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
-extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
struct wlfw_request_mem_ind_msg_v01 {
u32 mem_seg_len;
@@ -590,7 +590,7 @@ struct wlfw_request_mem_ind_msg_v01 {
};
#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
-extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
struct wlfw_respond_mem_req_msg_v01 {
u32 mem_seg_len;
@@ -598,28 +598,28 @@ struct wlfw_respond_mem_req_msg_v01 {
};
#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
-extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
struct wlfw_respond_mem_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
struct wlfw_mem_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
struct wlfw_fw_init_done_ind_msg_v01 {
char placeholder;
};
#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ind_msg_v01 {
u8 cause_for_rejuvenation_valid;
@@ -633,21 +633,21 @@ struct wlfw_rejuvenate_ind_msg_v01 {
};
#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
-extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ack_req_msg_v01 {
char placeholder;
};
#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
-extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
struct wlfw_rejuvenate_ack_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
struct wlfw_dynamic_feature_mask_req_msg_v01 {
u8 mask_valid;
@@ -655,7 +655,7 @@ struct wlfw_dynamic_feature_mask_req_msg_v01 {
};
#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
-extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
struct wlfw_dynamic_feature_mask_resp_msg_v01 {
struct qmi_response_type_v01 resp;
@@ -666,7 +666,7 @@ struct wlfw_dynamic_feature_mask_resp_msg_v01 {
};
#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
-extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
struct wlfw_m3_info_req_msg_v01 {
u64 addr;
@@ -674,20 +674,20 @@ struct wlfw_m3_info_req_msg_v01 {
};
#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
-extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
struct wlfw_m3_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
-extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
struct wlfw_xo_cal_ind_msg_v01 {
u8 xo_cal_data;
};
#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
-extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 876410a47d1d..6b6aa3c36744 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -584,7 +584,14 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "did not find station from tdls peer event");
goto exit;
}
+
arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ ath10k_warn(ar, "no vif for vdev_id %d found",
+ __le32_to_cpu(ev->vdev_id));
+ goto exit;
+ }
+
ieee80211_tdls_oper_request(
arvif->vif, station->addr,
NL80211_TDLS_TEARDOWN,
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b99180bc8172..edf78df9b12f 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -195,6 +195,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
},
{
.name = "qca6390 hw2.0",
@@ -277,6 +278,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
},
{
.name = "qcn9074 hw1.0",
@@ -356,6 +358,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
},
{
.name = "wcn6855 hw2.0",
@@ -438,6 +441,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
},
{
.name = "wcn6855 hw2.1",
@@ -519,6 +523,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
},
{
.name = "wcn6750 hw1.0",
@@ -597,6 +602,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = false,
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
+ .support_fw_mac_sequence = true,
},
};
@@ -1641,7 +1647,7 @@ static void ath11k_update_11d(struct work_struct *work)
}
}
-static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
+void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
@@ -1677,6 +1683,10 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
ath11k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1730,9 +1740,6 @@ static void ath11k_core_restart(struct work_struct *work)
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
int ret;
- if (!ab->is_reset)
- ath11k_core_pre_reconfigure_recovery(ab);
-
ret = ath11k_core_reconfigure_on_crash(ab);
if (ret) {
ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index cf2f52cc4e30..22460b0abf03 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -505,6 +505,8 @@ struct ath11k_sta {
u64 ps_start_jiffies;
u64 ps_total_duration;
bool peer_current_ps_valid;
+
+ u32 bw_prev;
};
#define ATH11K_MIN_5G_FREQ 4150
@@ -1157,6 +1159,7 @@ int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
+void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *filename);
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 8a3f24862edc..0c5ef8a526d8 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -219,6 +219,7 @@ struct ath11k_hw_params {
bool tcl_ring_retry;
u32 tx_ring_size;
bool smp2p_wow_exit;
+ bool support_fw_mac_sequence;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 2d1e3fd9b526..9e923ecb0891 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -241,7 +241,10 @@ const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
#define ath11k_a_rates (ath11k_legacy_rates + 4)
#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
-#define ATH11K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
+#define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD 200 /* in msecs */
+
+/* Overhead due to the processing of channel switch events from FW */
+#define ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* in msecs */
static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
@@ -3612,6 +3615,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct scan_req_params arg;
int ret = 0;
int i;
+ u32 scan_timeout;
mutex_lock(&ar->conf_mutex);
@@ -3681,6 +3685,26 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
}
+ /* if duration is set, default dwell times will be overwritten */
+ if (req->duration) {
+ arg.dwell_time_active = req->duration;
+ arg.dwell_time_active_2g = req->duration;
+ arg.dwell_time_active_6g = req->duration;
+ arg.dwell_time_passive = req->duration;
+ arg.dwell_time_passive_6g = req->duration;
+ arg.burst_duration = req->duration;
+
+ scan_timeout = min_t(u32, arg.max_rest_time *
+ (arg.num_chan - 1) + (req->duration +
+ ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
+ arg.num_chan, arg.max_scan_time);
+ } else {
+ scan_timeout = arg.max_scan_time;
+ }
+
+ /* Add a margin to account for event/command processing */
+ scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
+
ret = ath11k_start_scan(ar, &arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
@@ -3689,10 +3713,8 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
- /* Add a 200ms margin to account for event/command processing */
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
- msecs_to_jiffies(arg.max_scan_time +
- ATH11K_MAC_SCAN_TIMEOUT_MSECS));
+ msecs_to_jiffies(scan_timeout));
exit:
kfree(arg.chan_list);
@@ -4215,10 +4237,11 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
const u16 *he_mcs_mask;
- u32 changed, bw, nss, smps;
+ u32 changed, bw, nss, smps, bw_prev;
int err, num_vht_rates, num_he_rates;
const struct cfg80211_bitrate_mask *mask;
struct peer_assoc_params peer_arg;
+ enum wmi_phy_mode peer_phymode;
arsta = container_of(wk, struct ath11k_sta, update_wk);
sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
@@ -4239,6 +4262,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
arsta->changed = 0;
bw = arsta->bw;
+ bw_prev = arsta->bw_prev;
nss = arsta->nss;
smps = arsta->smps;
@@ -4252,26 +4276,57 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
- /* Send peer assoc command before set peer bandwidth param to
- * avoid the mismatch between the peer phymode and the peer
- * bandwidth.
- */
- ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true);
-
- peer_arg.is_assoc = false;
- err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
- if (err) {
- ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, err);
- } else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ /* Get the peer phymode */
+ ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
+ peer_phymode = peer_arg.peer_phymode;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, peer_phymode);
+
+ if (bw > bw_prev) {
+ /* BW is upgraded. In this case we send WMI_PEER_PHYMODE
+ * followed by WMI_PEER_CHWIDTH
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac BW upgrade for sta %pM new BW %d, old BW %d\n",
+ sta->addr, bw, bw_prev);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_PHYMODE, peer_phymode);
+
+ if (err) {
+ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, peer_phymode, err);
+ goto err_rc_bw_changed;
+ }
+
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_CHWIDTH, bw);
+
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
} else {
- ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ /* BW is downgraded. In this case we send WMI_PEER_CHWIDTH
+ * followed by WMI_PEER_PHYMODE
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac BW downgrade for sta %pM new BW %d,old BW %d\n",
+ sta->addr, bw, bw_prev);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+
+ if (err) {
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ goto err_rc_bw_changed;
+ }
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_PHYMODE, peer_phymode);
+
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, peer_phymode, err);
}
}
@@ -4352,6 +4407,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
}
}
+err_rc_bw_changed:
mutex_unlock(&ar->conf_mutex);
}
@@ -4505,6 +4561,34 @@ exit:
return ret;
}
+static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ u32 bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n",
+ sta->deflink.bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ return bw;
+}
+
static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4590,6 +4674,12 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
spin_lock_bh(&ar->ab->base_lock);
@@ -4713,28 +4803,8 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
spin_lock_bh(&ar->data_lock);
if (changed & IEEE80211_RC_BW_CHANGED) {
- bw = WMI_PEER_CHWIDTH_20MHZ;
-
- switch (sta->deflink.bandwidth) {
- case IEEE80211_STA_RX_BW_20:
- bw = WMI_PEER_CHWIDTH_20MHZ;
- break;
- case IEEE80211_STA_RX_BW_40:
- bw = WMI_PEER_CHWIDTH_40MHZ;
- break;
- case IEEE80211_STA_RX_BW_80:
- bw = WMI_PEER_CHWIDTH_80MHZ;
- break;
- case IEEE80211_STA_RX_BW_160:
- bw = WMI_PEER_CHWIDTH_160MHZ;
- break;
- default:
- ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
- sta->deflink.bandwidth, sta->addr);
- bw = WMI_PEER_CHWIDTH_20MHZ;
- break;
- }
-
+ bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
arsta->bw = bw;
}
@@ -6163,6 +6233,40 @@ void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab)
}
}
+static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ unsigned long time_left;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ ar->ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ar->num_created_vdevs--;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+
+ return ret;
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -6373,18 +6477,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
+ ath11k_debugfs_add_interface(arvif);
+
if (vif->type != NL80211_IFTYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_create(ar);
- if (ret) {
+ if (ret)
ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
ret);
- goto err_peer_del;
- }
}
- ath11k_debugfs_add_interface(arvif);
-
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -6400,16 +6502,12 @@ err_peer_del:
}
err_vdev_del:
- ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
- ar->num_created_vdevs--;
- ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
- ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ ath11k_mac_vdev_delete(ar, arvif);
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
err:
- ath11k_debugfs_remove_interface(arvif);
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -6432,7 +6530,6 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = ar->ab;
- unsigned long time_left;
int ret;
int i;
@@ -6453,29 +6550,13 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
- reinit_completion(&ar->vdev_delete_done);
-
- ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ret = ath11k_mac_vdev_delete(ar, arvif);
if (ret) {
- ath11k_warn(ab, "failed to delete WMI vdev %d: %d\n",
+ ath11k_warn(ab, "failed to delete vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_vdev_del;
}
- time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
- ATH11K_VDEV_DELETE_TIMEOUT_HZ);
- if (time_left == 0) {
- ath11k_warn(ab, "Timeout in receiving vdev delete response\n");
- goto err_vdev_del;
- }
-
- ab->free_vdev_map |= 1LL << (arvif->vdev_id);
- ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
- ar->num_created_vdevs--;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
- vif->addr, arvif->vdev_id);
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->monitor_vdev_id = -1;
@@ -7929,6 +8010,7 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
int recovery_count;
+ struct ath11k_vif *arvif;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
@@ -7964,6 +8046,12 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
}
}
+ if (ar->ab->hw_params.support_fw_mac_sequence) {
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ieee80211_hw_restart_disconnect(arvif->vif);
+ }
+ }
}
mutex_unlock(&ar->conf_mutex);
@@ -8539,6 +8627,7 @@ err_fallback:
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath11k_mac_op_start,
.stop = ath11k_mac_op_stop,
.reconfig_complete = ath11k_mac_op_reconfig_complete,
@@ -9014,6 +9103,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
}
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
ath11k_reg_init(ar);
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 2a0d3afb0c99..0231783ad754 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -163,7 +163,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
-u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 380f9d37b644..30d66147223f 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -218,9 +218,16 @@ int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
if (wakeup_required && ab->pci.ops->wakeup) {
ret = ab->pci.ops->wakeup(ab);
if (ret) {
- ath11k_warn(ab, "failed to wakeup for read from 0x%x: %d\n",
- start, ret);
- return ret;
+ ath11k_warn(ab,
+ "wakeup failed, data may be invalid: %d",
+ ret);
+ /* Even though wakeup() failed, continue processing rather
+ * than returning because some parts of the data may still
+ * be valid and useful in some cases, e.g. could give us
+ * some clues on firmware crash.
+ * Mislead due to invalid data could be avoided because we
+ * are aware of the wakeup failure.
+ */
}
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 51de2208b789..ab923e24b0a9 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -19,6 +19,7 @@
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+#define PLATFORM_CAP_PCIE_PME_D3COLD 0x10
#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
@@ -28,7 +29,7 @@ module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644);
MODULE_PARM_DESC(cold_boot_cal,
"Decrease the channel switch time but increase the driver load time (Default: true)");
-static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -279,7 +280,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -296,7 +297,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -521,7 +522,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -557,7 +558,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -589,7 +590,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -631,7 +632,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -658,7 +659,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -698,7 +699,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -725,7 +726,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -743,7 +744,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -751,7 +752,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -759,7 +760,7 @@ static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -813,7 +814,7 @@ static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -839,7 +840,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -856,7 +857,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -872,7 +873,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -898,7 +899,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1099,7 +1100,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1234,7 +1235,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1252,7 +1253,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1276,7 +1277,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1293,7 +1294,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1346,7 +1347,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1381,7 +1382,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@@ -1405,7 +1406,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1422,7 +1423,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1457,7 +1458,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1475,7 +1476,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1614,7 +1615,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1631,28 +1632,28 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1678,7 +1679,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1696,7 +1697,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -1752,6 +1753,8 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
if (ab->hw_params.global_reset)
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+ req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD;
+
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n");
ret = qmi_txn_init(&ab->qmi.handle, &txn,
@@ -1961,7 +1964,7 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
continue;
dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].prev_size,
ab->qmi.target_mem[i].vaddr,
ab->qmi.target_mem[i].paddr);
ab->qmi.target_mem[i].vaddr = NULL;
@@ -1982,12 +1985,12 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
* in such case, no need to allocate memory for FW again.
*/
if (chunk->vaddr) {
- if (chunk->prev_type == chunk->type ||
+ if (chunk->prev_type == chunk->type &&
chunk->prev_size == chunk->size)
continue;
/* cannot reuse the existing chunk */
- dma_free_coherent(ab->dev, chunk->size,
+ dma_free_coherent(ab->dev, chunk->prev_size,
chunk->vaddr, chunk->paddr);
chunk->vaddr = NULL;
}
@@ -3087,6 +3090,9 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
.fn = ath11k_qmi_msg_fw_init_done_cb,
},
+
+ /* end of list */
+ {},
};
static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -3158,6 +3164,9 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
case ATH11K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ if (!ab->is_reset)
+ ath11k_core_pre_reconfigure_recovery(ab);
break;
case ATH11K_QMI_EVENT_REQUEST_MEM:
ret = ath11k_qmi_event_mem_request(qmi);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index fad9f8d308a2..2a8a3e3dcff6 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -6829,7 +6829,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
- "peer sta ps chnange ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ "peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
ev->peer_macaddr.addr, ev->peer_ps_state,
ev->ps_supported_bitmap, ev->peer_ps_valid,
ev->peer_ps_timestamp);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index ed5d2160a72a..11ed30d6b595 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -781,6 +781,7 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath5k_start,
.stop = ath5k_stop,
.add_interface = ath5k_add_interface,
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 9bdfcee2f448..eff94bcd1f0a 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -45,11 +45,6 @@ ath9k_hw-y:= \
ar9003_eeprom.o \
ar9003_paprd.o
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_mac.o += -Wno-array-bounds
-endif
-
ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 9899661f9a60..8d7efd80d97a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -585,7 +585,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 *p_gpm = NULL, more_data;
u32 offset;
u8 recv_type = 0, recv_opcode = 0;
bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
@@ -656,7 +656,6 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
} else {
ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
*(p_gpm + 1));
- mismatch++;
ar9003_mci_process_gpm_extra(ah, recv_type,
recv_opcode, p_gpm);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3ccf8cfc6b63..2cc23605c9fc 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -20,6 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/kstrtox.h>
#include <linux/leds.h>
#include <linux/completion.h>
#include <linux/time.h>
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 4d9002a9d082..1a2e0c7eeb02 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -708,14 +708,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
- struct sk_buff *nskb;
int ret;
if (!skb)
return;
if (!hif_dev)
- goto free;
+ goto free_skb;
switch (urb->status) {
case 0:
@@ -724,7 +723,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
- goto free;
+ goto free_skb;
default:
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
@@ -735,25 +734,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
if (likely(urb->actual_length != 0)) {
skb_put(skb, urb->actual_length);
- /* Process the command first */
+ /*
+ * Process the command first.
+ * skb is either freed here or passed to be
+ * managed to another callback function.
+ */
ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
skb->len, USB_REG_IN_PIPE);
-
- nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
- if (!nskb) {
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!skb) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: REG_IN memory allocation failure\n");
- urb->context = NULL;
- return;
+ goto free_rx_buf;
}
- rx_buf->skb = nskb;
+ rx_buf->skb = skb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
- nskb->data, MAX_REG_IN_BUF_SIZE,
+ skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
}
@@ -762,12 +763,13 @@ resubmit:
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
usb_unanchor_urb(urb);
- goto free;
+ goto free_skb;
}
return;
-free:
+free_skb:
kfree_skb(skb);
+free_rx_buf:
kfree(rx_buf);
urb->context = NULL;
}
@@ -780,14 +782,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
- usb_get_urb(tx_buf->urb);
- spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
- usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
- spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
@@ -1329,10 +1327,24 @@ static int send_eject_command(struct usb_interface *interface)
static int ath9k_hif_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out;
struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *alt;
struct hif_device_usb *hif_dev;
int ret = 0;
+ /* Verify the expected endpoints are present */
+ alt = interface->cur_altsetting;
+ if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 ||
+ usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE ||
+ usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE ||
+ usb_endpoint_num(int_in) != USB_REG_IN_PIPE ||
+ usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) {
+ dev_err(&udev->dev,
+ "ath9k_htc: Device endpoint numbers are not the expected ones\n");
+ return -ENODEV;
+ }
+
if (id->driver_info == STORAGE_DEVICE)
return send_eject_command(interface);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 61875c45366b..51766de5ec3b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1870,6 +1870,7 @@ static void ath9k_htc_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_ops ath9k_htc_ops = {
.tx = ath9k_htc_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath9k_htc_start,
.stop = ath9k_htc_stop,
.add_interface = ath9k_htc_add_interface,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 039bf0c35fbe..3363fc4e8966 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -266,7 +266,9 @@ static void ath_mci_set_concur_txprio(struct ath_softc *sc)
stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
ATH_MCI_INQUIRY_PRIO;
} else {
- u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
+ static const u8 prof_prio[] = {
+ 50, 90, 94, 52
+ }; /* RFCOMM, A2DP, HID, PAN */
stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
@@ -644,7 +646,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
struct ath9k_channel *chan = ah->curchan;
- u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
+ static const u32 channelmap[] = {
+ 0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff
+ };
int i;
s16 chan_start, chan_end;
u16 wlan_chan;
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 95544ce05acf..8a996ed9a3be 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -189,7 +189,7 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
buf[len] = '\0';
- if (strtobool(buf, &start))
+ if (kstrtobool(buf, &start))
return -EINVAL;
mutex_lock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index ba271a10d4ab..39abb59d8771 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1678,7 +1678,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf = NULL;
- int sent = 0;
int i, ret;
INIT_LIST_HEAD(&bf_q);
@@ -1707,7 +1706,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
bf_tail = bf;
nframes--;
- sent++;
TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
if (an->sta && skb_queue_empty(&tid->retry_q))
diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile
index 7463baa62fa8..1a81868ce26d 100644
--- a/drivers/net/wireless/ath/carl9170/Makefile
+++ b/drivers/net/wireless/ath/carl9170/Makefile
@@ -3,8 +3,3 @@ carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o
obj-$(CONFIG_CARL9170) += carl9170.o
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_cmd.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 4a500095555c..ff4b3b50250c 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -118,10 +118,10 @@ struct carl9170_reg_list {
} __packed;
struct carl9170_write_reg {
- struct {
+ DECLARE_FLEX_ARRAY(struct {
__le32 addr;
__le32 val;
- } regs[0] __packed;
+ } __packed, regs);
} __packed;
struct carl9170_write_reg_byte {
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 1540e9827f48..524327d24964 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1715,6 +1715,7 @@ static const struct ieee80211_ops carl9170_ops = {
.start = carl9170_op_start,
.stop = carl9170_op_stop,
.tx = carl9170_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.flush = carl9170_op_flush,
.add_interface = carl9170_op_add_interface,
.remove_interface = carl9170_op_remove_interface,
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 0a4e42e806b9..ded2c6d0a759 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -271,7 +271,7 @@ struct ar9170_tx_frame {
union {
struct ieee80211_hdr i3e;
- u8 payload[0];
+ DECLARE_FLEX_ARRAY(u8, payload);
} data;
} __packed;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 6b8d2889d73f..3b79cc1c7c5b 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1361,6 +1361,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.prepare_multicast = wcn36xx_prepare_multicast,
.configure_filter = wcn36xx_configure_filter,
.tx = wcn36xx_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_key = wcn36xx_set_key,
.hw_scan = wcn36xx_hw_scan,
.cancel_hw_scan = wcn36xx_cancel_hw_scan,
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 04d1aa0e2d35..c021ebcddee7 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -2154,7 +2154,7 @@ static const struct file_operations fops_led_blink_time = {
};
/*---------FW capabilities------------*/
-static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
+static int fw_capabilities_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
@@ -2163,22 +2163,10 @@ static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_fw_capabilities_debugfs_show,
- inode->i_private);
-}
-
-static const struct file_operations fops_fw_capabilities = {
- .open = wil_fw_capabilities_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(fw_capabilities);
/*---------FW version------------*/
-static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
+static int fw_version_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
@@ -2189,19 +2177,7 @@ static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_fw_version_debugfs_show,
- inode->i_private);
-}
-
-static const struct file_operations fops_fw_version = {
- .open = wil_fw_version_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(fw_version);
/*---------suspend_stats---------*/
static ssize_t wil_write_suspend_stats(struct file *file,
@@ -2366,8 +2342,8 @@ static const struct {
{"recovery", 0644, &fops_recovery},
{"led_cfg", 0644, &fops_led_cfg},
{"led_blink_time", 0644, &fops_led_blink_time},
- {"fw_capabilities", 0444, &fops_fw_capabilities},
- {"fw_version", 0444, &fops_fw_version},
+ {"fw_capabilities", 0444, &fw_capabilities_fops},
+ {"fw_version", 0444, &fw_version_fops},
{"suspend_stats", 0644, &fops_suspend_stats},
{"compressed_rx_status", 0644, &fops_compressed_rx_status},
{"srings", 0444, &srings_fops},
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 24e609c1f523..009bca34ece3 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2179,6 +2179,7 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static const struct ieee80211_ops at76_ops = {
.tx = at76_mac80211_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = at76_add_interface,
.remove_interface = at76_remove_interface,
.config = at76_config,
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 45d079b93384..7c2d1c588156 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1643,9 +1643,10 @@ EXPORT_SYMBOL(stop_atmel_card);
static int atmel_set_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->essid;
struct atmel_private *priv = netdev_priv(dev);
/* Check if we asked for `any' */
@@ -1671,9 +1672,10 @@ static int atmel_set_essid(struct net_device *dev,
static int atmel_get_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->essid;
struct atmel_private *priv = netdev_priv(dev);
/* Get the current SSID */
@@ -1692,9 +1694,10 @@ static int atmel_get_essid(struct net_device *dev,
static int atmel_get_wap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *awrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct sockaddr *awrq = &wrqu->ap_addr;
struct atmel_private *priv = netdev_priv(dev);
memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
@@ -1704,9 +1707,10 @@ static int atmel_get_wap(struct net_device *dev,
static int atmel_set_encode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->encoding;
struct atmel_private *priv = netdev_priv(dev);
/* Basic checking: do we have a key to set ?
@@ -1793,9 +1797,10 @@ static int atmel_set_encode(struct net_device *dev,
static int atmel_get_encode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->encoding;
struct atmel_private *priv = netdev_priv(dev);
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
@@ -2003,18 +2008,19 @@ static int atmel_get_auth(struct net_device *dev,
static int atmel_get_name(struct net_device *dev,
struct iw_request_info *info,
- char *cwrq,
+ union iwreq_data *wrqu,
char *extra)
{
- strcpy(cwrq, "IEEE 802.11-DS");
+ strcpy(wrqu->name, "IEEE 802.11-DS");
return 0;
}
static int atmel_set_rate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->bitrate;
struct atmel_private *priv = netdev_priv(dev);
if (vwrq->fixed == 0) {
@@ -2053,9 +2059,10 @@ static int atmel_set_rate(struct net_device *dev,
static int atmel_set_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 *uwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ __u32 *uwrq = &wrqu->mode;
struct atmel_private *priv = netdev_priv(dev);
if (*uwrq != IW_MODE_ADHOC && *uwrq != IW_MODE_INFRA)
@@ -2067,9 +2074,10 @@ static int atmel_set_mode(struct net_device *dev,
static int atmel_get_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 *uwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ __u32 *uwrq = &wrqu->mode;
struct atmel_private *priv = netdev_priv(dev);
*uwrq = priv->operating_mode;
@@ -2078,9 +2086,10 @@ static int atmel_get_mode(struct net_device *dev,
static int atmel_get_rate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->bitrate;
struct atmel_private *priv = netdev_priv(dev);
if (priv->auto_tx_rate) {
@@ -2108,9 +2117,10 @@ static int atmel_get_rate(struct net_device *dev,
static int atmel_set_power(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->power;
struct atmel_private *priv = netdev_priv(dev);
priv->power_mode = vwrq->disabled ? 0 : 1;
return -EINPROGRESS;
@@ -2118,9 +2128,10 @@ static int atmel_set_power(struct net_device *dev,
static int atmel_get_power(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->power;
struct atmel_private *priv = netdev_priv(dev);
vwrq->disabled = priv->power_mode ? 0 : 1;
vwrq->flags = IW_POWER_ON;
@@ -2129,9 +2140,10 @@ static int atmel_get_power(struct net_device *dev,
static int atmel_set_retry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->retry;
struct atmel_private *priv = netdev_priv(dev);
if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
@@ -2152,9 +2164,10 @@ static int atmel_set_retry(struct net_device *dev,
static int atmel_get_retry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->retry;
struct atmel_private *priv = netdev_priv(dev);
vwrq->disabled = 0; /* Can't be disabled */
@@ -2175,9 +2188,10 @@ static int atmel_get_retry(struct net_device *dev,
static int atmel_set_rts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->rts;
struct atmel_private *priv = netdev_priv(dev);
int rthr = vwrq->value;
@@ -2193,9 +2207,10 @@ static int atmel_set_rts(struct net_device *dev,
static int atmel_get_rts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->rts;
struct atmel_private *priv = netdev_priv(dev);
vwrq->value = priv->rts_threshold;
@@ -2207,9 +2222,10 @@ static int atmel_get_rts(struct net_device *dev,
static int atmel_set_frag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->frag;
struct atmel_private *priv = netdev_priv(dev);
int fthr = vwrq->value;
@@ -2226,9 +2242,10 @@ static int atmel_set_frag(struct net_device *dev,
static int atmel_get_frag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->frag;
struct atmel_private *priv = netdev_priv(dev);
vwrq->value = priv->frag_threshold;
@@ -2240,9 +2257,10 @@ static int atmel_get_frag(struct net_device *dev,
static int atmel_set_freq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *fwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_freq *fwrq = &wrqu->freq;
struct atmel_private *priv = netdev_priv(dev);
int rc = -EINPROGRESS; /* Call commit handler */
@@ -2270,9 +2288,10 @@ static int atmel_set_freq(struct net_device *dev,
static int atmel_get_freq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *fwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_freq *fwrq = &wrqu->freq;
struct atmel_private *priv = netdev_priv(dev);
fwrq->m = priv->channel;
@@ -2282,7 +2301,7 @@ static int atmel_get_freq(struct net_device *dev,
static int atmel_set_scan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *dwrq,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
@@ -2320,9 +2339,10 @@ static int atmel_set_scan(struct net_device *dev,
static int atmel_get_scan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct atmel_private *priv = netdev_priv(dev);
int i;
char *current_ev = extra;
@@ -2391,9 +2411,10 @@ static int atmel_get_scan(struct net_device *dev,
static int atmel_get_range(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct atmel_private *priv = netdev_priv(dev);
struct iw_range *range = (struct iw_range *) extra;
int k, i, j;
@@ -2465,9 +2486,10 @@ static int atmel_get_range(struct net_device *dev,
static int atmel_set_wap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *awrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct sockaddr *awrq = &wrqu->ap_addr;
struct atmel_private *priv = netdev_priv(dev);
int i;
static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -2507,7 +2529,7 @@ static int atmel_set_wap(struct net_device *dev,
static int atmel_config_commit(struct net_device *dev,
struct iw_request_info *info, /* NULL */
- void *zwrq, /* NULL */
+ union iwreq_data *zwrq, /* NULL */
char *extra) /* NULL */
{
return atmel_open(dev);
@@ -2515,61 +2537,35 @@ static int atmel_config_commit(struct net_device *dev,
static const iw_handler atmel_handler[] =
{
- (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */
- (iw_handler) atmel_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */
- (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */
- (iw_handler) atmel_set_mode, /* SIOCSIWMODE */
- (iw_handler) atmel_get_mode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) atmel_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- (iw_handler) NULL, /* SIOCSIWSPY */
- (iw_handler) NULL, /* SIOCGIWSPY */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) atmel_set_wap, /* SIOCSIWAP */
- (iw_handler) atmel_get_wap, /* SIOCGIWAP */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCGIWAPLIST */
- (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */
- (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */
- (iw_handler) atmel_set_essid, /* SIOCSIWESSID */
- (iw_handler) atmel_get_essid, /* SIOCGIWESSID */
- (iw_handler) NULL, /* SIOCSIWNICKN */
- (iw_handler) NULL, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) atmel_set_rate, /* SIOCSIWRATE */
- (iw_handler) atmel_get_rate, /* SIOCGIWRATE */
- (iw_handler) atmel_set_rts, /* SIOCSIWRTS */
- (iw_handler) atmel_get_rts, /* SIOCGIWRTS */
- (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */
- (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */
- (iw_handler) NULL, /* SIOCSIWTXPOW */
- (iw_handler) NULL, /* SIOCGIWTXPOW */
- (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */
- (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */
- (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */
- (iw_handler) atmel_get_encode, /* SIOCGIWENCODE */
- (iw_handler) atmel_set_power, /* SIOCSIWPOWER */
- (iw_handler) atmel_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCSIWGENIE */
- (iw_handler) NULL, /* SIOCGIWGENIE */
- (iw_handler) atmel_set_auth, /* SIOCSIWAUTH */
- (iw_handler) atmel_get_auth, /* SIOCGIWAUTH */
- (iw_handler) atmel_set_encodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) atmel_get_encodeext, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
+ IW_HANDLER(SIOCSIWCOMMIT, atmel_config_commit),
+ IW_HANDLER(SIOCGIWNAME, atmel_get_name),
+ IW_HANDLER(SIOCSIWFREQ, atmel_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, atmel_get_freq),
+ IW_HANDLER(SIOCSIWMODE, atmel_set_mode),
+ IW_HANDLER(SIOCGIWMODE, atmel_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, atmel_get_range),
+ IW_HANDLER(SIOCSIWAP, atmel_set_wap),
+ IW_HANDLER(SIOCGIWAP, atmel_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, atmel_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, atmel_get_scan),
+ IW_HANDLER(SIOCSIWESSID, atmel_set_essid),
+ IW_HANDLER(SIOCGIWESSID, atmel_get_essid),
+ IW_HANDLER(SIOCSIWRATE, atmel_set_rate),
+ IW_HANDLER(SIOCGIWRATE, atmel_get_rate),
+ IW_HANDLER(SIOCSIWRTS, atmel_set_rts),
+ IW_HANDLER(SIOCGIWRTS, atmel_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, atmel_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, atmel_get_frag),
+ IW_HANDLER(SIOCSIWRETRY, atmel_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, atmel_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, atmel_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, atmel_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, atmel_set_power),
+ IW_HANDLER(SIOCGIWPOWER, atmel_get_power),
+ IW_HANDLER(SIOCSIWAUTH, atmel_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, atmel_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, atmel_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, atmel_get_encodeext),
};
static const iw_handler atmel_private_handler[] =
@@ -2614,8 +2610,8 @@ static const struct iw_handler_def atmel_handler_def = {
.num_standard = ARRAY_SIZE(atmel_handler),
.num_private = ARRAY_SIZE(atmel_private_handler),
.num_private_args = ARRAY_SIZE(atmel_private_args),
- .standard = (iw_handler *) atmel_handler,
- .private = (iw_handler *) atmel_private_handler,
+ .standard = atmel_handler,
+ .private = atmel_private_handler,
.private_args = (struct iw_priv_args *) atmel_private_args,
.get_wireless_stats = atmel_get_wireless_stats
};
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index b2539a916fd0..92ca0b2ca286 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5171,6 +5171,7 @@ static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
static const struct ieee80211_ops b43_hw_ops = {
.tx = b43_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43_op_conf_tx,
.add_interface = b43_op_add_interface,
.remove_interface = b43_op_remove_interface,
@@ -5783,15 +5784,12 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
static void b43_print_driverinfo(void)
{
- const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
+ const char *feat_pci = "", *feat_nphy = "",
*feat_leds = "", *feat_sdio = "";
#ifdef CONFIG_B43_PCI_AUTOSELECT
feat_pci = "P";
#endif
-#ifdef CONFIG_B43_PCMCIA
- feat_pcmcia = "M";
-#endif
#ifdef CONFIG_B43_PHY_N
feat_nphy = "N";
#endif
@@ -5802,9 +5800,8 @@ static void b43_print_driverinfo(void)
feat_sdio = "S";
#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
- "[ Features: %s%s%s%s%s ]\n",
- feat_pci, feat_pcmcia, feat_nphy,
- feat_leds, feat_sdio);
+ "[ Features: %s%s%s%s ]\n",
+ feat_pci, feat_nphy, feat_leds, feat_sdio);
}
static int __init b43_init(void)
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 4022c544aefe..760136638a95 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3532,6 +3532,7 @@ static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
static const struct ieee80211_ops b43legacy_hw_ops = {
.tx = b43legacy_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43legacy_op_conf_tx,
.add_interface = b43legacy_op_add_interface,
.remove_interface = b43legacy_op_remove_interface,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 2208ab3aa795..60f5645aead3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -24,6 +24,12 @@
#define BRCMF_NROF_COMMON_MSGRINGS (BRCMF_NROF_H2D_COMMON_MSGRINGS + \
BRCMF_NROF_D2H_COMMON_MSGRINGS)
+/* The interval to poll console */
+#define BRCMF_CONSOLE 10
+
+/* The maximum console interval value (5 mins) */
+#define MAX_CONSOLE_INTERVAL (5 * 60)
+
/* The level of bus communication with the dongle */
enum brcmf_bus_state {
BRCMF_BUS_DOWN, /* Not ready for frame transfers */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index dfcfb3333369..bff3128c2f26 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -88,9 +88,42 @@
#define BRCMF_PS_MAX_TIMEOUT_MS 2000
+/* Dump obss definitions */
+#define ACS_MSRMNT_DELAY 80
+#define CHAN_NOISE_DUMMY (-80)
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
+#define BRCMF_MAX_CHANSPEC_LIST \
+ (BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1)
+
+struct brcmf_dump_survey {
+ u32 obss;
+ u32 ibss;
+ u32 no_ctg;
+ u32 no_pckt;
+ u32 tx;
+ u32 idle;
+};
+
+struct cca_stats_n_flags {
+ u32 msrmnt_time; /* Time for Measurement (msec) */
+ u32 msrmnt_done; /* flag set when measurement complete */
+ char buf[1];
+};
+
+struct cca_msrmnt_query {
+ u32 msrmnt_query;
+ u32 time_req;
+};
+
static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
{
if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
@@ -234,6 +267,48 @@ struct parsed_vndr_ies {
struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
+#define WL_INTERFACE_CREATE_VER_1 1
+#define WL_INTERFACE_CREATE_VER_2 2
+#define WL_INTERFACE_CREATE_VER_3 3
+#define WL_INTERFACE_CREATE_VER_MAX WL_INTERFACE_CREATE_VER_3
+
+#define WL_INTERFACE_MAC_DONT_USE 0x0
+#define WL_INTERFACE_MAC_USE 0x2
+
+#define WL_INTERFACE_CREATE_STA 0x0
+#define WL_INTERFACE_CREATE_AP 0x1
+
+struct wl_interface_create_v1 {
+ u16 ver; /* structure version */
+ u32 flags; /* flags for operation */
+ u8 mac_addr[ETH_ALEN]; /* MAC address */
+ u32 wlc_index; /* optional for wlc index */
+};
+
+struct wl_interface_create_v2 {
+ u16 ver; /* structure version */
+ u8 pad1[2];
+ u32 flags; /* flags for operation */
+ u8 mac_addr[ETH_ALEN]; /* MAC address */
+ u8 iftype; /* type of interface created */
+ u8 pad2;
+ u32 wlc_index; /* optional for wlc index */
+};
+
+struct wl_interface_create_v3 {
+ u16 ver; /* structure version */
+ u16 len; /* length of structure + data */
+ u16 fixed_len; /* length of structure */
+ u8 iftype; /* type of interface created */
+ u8 wlc_index; /* optional for wlc index */
+ u32 flags; /* flags for operation */
+ u8 mac_addr[ETH_ALEN]; /* MAC address */
+ u8 bssid[ETH_ALEN]; /* optional for BSSID */
+ u8 if_index; /* interface index request */
+ u8 pad[3];
+ u8 data[]; /* Optional for specific data */
+};
+
static u8 nl80211_band_to_fwil(enum nl80211_band band)
{
switch (band) {
@@ -521,40 +596,228 @@ static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
return -ENOMEM;
}
+static void brcmf_set_vif_sta_macaddr(struct brcmf_if *ifp, u8 *mac_addr)
+{
+ u8 mac_idx = ifp->drvr->sta_mac_idx;
+
+ /* set difference MAC address with locally administered bit */
+ memcpy(mac_addr, ifp->mac_addr, ETH_ALEN);
+ mac_addr[0] |= 0x02;
+ mac_addr[3] ^= mac_idx ? 0xC0 : 0xA0;
+ mac_idx++;
+ mac_idx = mac_idx % 2;
+ ifp->drvr->sta_mac_idx = mac_idx;
+}
+
+static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr)
+{
+ struct wl_interface_create_v1 iface_v1;
+ struct wl_interface_create_v2 iface_v2;
+ struct wl_interface_create_v3 iface_v3;
+ u32 iface_create_ver;
+ int err;
+
+ /* interface_create version 1 */
+ memset(&iface_v1, 0, sizeof(iface_v1));
+ iface_v1.ver = WL_INTERFACE_CREATE_VER_1;
+ iface_v1.flags = WL_INTERFACE_CREATE_STA |
+ WL_INTERFACE_MAC_USE;
+ if (!is_zero_ether_addr(macaddr))
+ memcpy(iface_v1.mac_addr, macaddr, ETH_ALEN);
+ else
+ brcmf_set_vif_sta_macaddr(ifp, iface_v1.mac_addr);
+
+ err = brcmf_fil_iovar_data_get(ifp, "interface_create",
+ &iface_v1,
+ sizeof(iface_v1));
+ if (err) {
+ brcmf_info("failed to create interface(v1), err=%d\n",
+ err);
+ } else {
+ brcmf_dbg(INFO, "interface created(v1)\n");
+ return 0;
+ }
+
+ /* interface_create version 2 */
+ memset(&iface_v2, 0, sizeof(iface_v2));
+ iface_v2.ver = WL_INTERFACE_CREATE_VER_2;
+ iface_v2.flags = WL_INTERFACE_MAC_USE;
+ iface_v2.iftype = WL_INTERFACE_CREATE_STA;
+ if (!is_zero_ether_addr(macaddr))
+ memcpy(iface_v2.mac_addr, macaddr, ETH_ALEN);
+ else
+ brcmf_set_vif_sta_macaddr(ifp, iface_v2.mac_addr);
+
+ err = brcmf_fil_iovar_data_get(ifp, "interface_create",
+ &iface_v2,
+ sizeof(iface_v2));
+ if (err) {
+ brcmf_info("failed to create interface(v2), err=%d\n",
+ err);
+ } else {
+ brcmf_dbg(INFO, "interface created(v2)\n");
+ return 0;
+ }
+
+ /* interface_create version 3+ */
+ /* get supported version from firmware side */
+ iface_create_ver = 0;
+ err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
+ &iface_create_ver);
+ if (err) {
+ brcmf_err("fail to get supported version, err=%d\n", err);
+ return -EOPNOTSUPP;
+ }
+
+ switch (iface_create_ver) {
+ case WL_INTERFACE_CREATE_VER_3:
+ memset(&iface_v3, 0, sizeof(iface_v3));
+ iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+ iface_v3.flags = WL_INTERFACE_MAC_USE;
+ iface_v3.iftype = WL_INTERFACE_CREATE_STA;
+ if (!is_zero_ether_addr(macaddr))
+ memcpy(iface_v3.mac_addr, macaddr, ETH_ALEN);
+ else
+ brcmf_set_vif_sta_macaddr(ifp, iface_v3.mac_addr);
+
+ err = brcmf_fil_iovar_data_get(ifp, "interface_create",
+ &iface_v3,
+ sizeof(iface_v3));
+
+ if (!err)
+ brcmf_dbg(INFO, "interface created(v3)\n");
+ break;
+ default:
+ brcmf_err("not support interface create(v%d)\n",
+ iface_create_ver);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (err) {
+ brcmf_info("station interface creation failed (%d)\n",
+ err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
{
+ struct wl_interface_create_v1 iface_v1;
+ struct wl_interface_create_v2 iface_v2;
+ struct wl_interface_create_v3 iface_v3;
+ u32 iface_create_ver;
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_mbss_ssid_le mbss_ssid_le;
int bsscfgidx;
int err;
- memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
- bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
- if (bsscfgidx < 0)
- return bsscfgidx;
+ /* interface_create version 1 */
+ memset(&iface_v1, 0, sizeof(iface_v1));
+ iface_v1.ver = WL_INTERFACE_CREATE_VER_1;
+ iface_v1.flags = WL_INTERFACE_CREATE_AP |
+ WL_INTERFACE_MAC_USE;
- mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
- mbss_ssid_le.SSID_len = cpu_to_le32(5);
- sprintf(mbss_ssid_le.SSID, "ssid%d" , bsscfgidx);
+ brcmf_set_vif_sta_macaddr(ifp, iface_v1.mac_addr);
- err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
- sizeof(mbss_ssid_le));
- if (err < 0)
- bphy_err(drvr, "setting ssid failed %d\n", err);
+ err = brcmf_fil_iovar_data_get(ifp, "interface_create",
+ &iface_v1,
+ sizeof(iface_v1));
+ if (err) {
+ brcmf_info("failed to create interface(v1), err=%d\n",
+ err);
+ } else {
+ brcmf_dbg(INFO, "interface created(v1)\n");
+ return 0;
+ }
+
+ /* interface_create version 2 */
+ memset(&iface_v2, 0, sizeof(iface_v2));
+ iface_v2.ver = WL_INTERFACE_CREATE_VER_2;
+ iface_v2.flags = WL_INTERFACE_MAC_USE;
+ iface_v2.iftype = WL_INTERFACE_CREATE_AP;
+
+ brcmf_set_vif_sta_macaddr(ifp, iface_v2.mac_addr);
+
+ err = brcmf_fil_iovar_data_get(ifp, "interface_create",
+ &iface_v2,
+ sizeof(iface_v2));
+ if (err) {
+ brcmf_info("failed to create interface(v2), err=%d\n",
+ err);
+ } else {
+ brcmf_dbg(INFO, "interface created(v2)\n");
+ return 0;
+ }
+
+ /* interface_create version 3+ */
+ /* get supported version from firmware side */
+ iface_create_ver = 0;
+ err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
+ &iface_create_ver);
+ if (err) {
+ brcmf_err("fail to get supported version, err=%d\n", err);
+ return -EOPNOTSUPP;
+ }
+
+ switch (iface_create_ver) {
+ case WL_INTERFACE_CREATE_VER_3:
+ memset(&iface_v3, 0, sizeof(iface_v3));
+ iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+ iface_v3.flags = WL_INTERFACE_MAC_USE;
+ iface_v3.iftype = WL_INTERFACE_CREATE_AP;
+ brcmf_set_vif_sta_macaddr(ifp, iface_v3.mac_addr);
+
+ err = brcmf_fil_iovar_data_get(ifp, "interface_create",
+ &iface_v3,
+ sizeof(iface_v3));
+
+ if (!err)
+ brcmf_dbg(INFO, "interface created(v3)\n");
+ break;
+ default:
+ brcmf_err("not support interface create(v%d)\n",
+ iface_create_ver);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (err) {
+ brcmf_info("Does not support interface_create (%d)\n",
+ err);
+ memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
+ bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
+ if (bsscfgidx < 0)
+ return bsscfgidx;
+
+ mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
+ mbss_ssid_le.SSID_len = cpu_to_le32(5);
+ sprintf(mbss_ssid_le.SSID, "ssid%d", bsscfgidx);
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
+ sizeof(mbss_ssid_le));
+
+ if (err < 0)
+ bphy_err(drvr, "setting ssid failed %d\n", err);
+ }
return err;
}
/**
- * brcmf_ap_add_vif() - create a new AP virtual interface for multiple BSS
+ * brcmf_apsta_add_vif() - create a new AP or STA virtual interface
*
* @wiphy: wiphy device of new interface.
* @name: name of the new interface.
- * @params: contains mac address for AP device.
+ * @params: contains mac address for AP or STA device.
+ * @type: interface type.
*/
static
-struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
- struct vif_params *params)
+struct wireless_dev *brcmf_apsta_add_vif(struct wiphy *wiphy, const char *name,
+ struct vif_params *params,
+ enum nl80211_iftype type)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
@@ -562,18 +825,24 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_cfg80211_vif *vif;
int err;
+ if (type != NL80211_IFTYPE_STATION && type != NL80211_IFTYPE_AP)
+ return ERR_PTR(-EINVAL);
+
if (brcmf_cfg80211_vif_event_armed(cfg))
return ERR_PTR(-EBUSY);
brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
- vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP);
+ vif = brcmf_alloc_vif(cfg, type);
if (IS_ERR(vif))
return (struct wireless_dev *)vif;
brcmf_cfg80211_arm_vif_event(cfg, vif);
- err = brcmf_cfg80211_request_ap_if(ifp);
+ if (type == NL80211_IFTYPE_STATION)
+ err = brcmf_cfg80211_request_sta_if(ifp, params->macaddr);
+ else
+ err = brcmf_cfg80211_request_ap_if(ifp);
if (err) {
brcmf_cfg80211_arm_vif_event(cfg, NULL);
goto fail;
@@ -720,15 +989,15 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
}
switch (type) {
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
case NL80211_IFTYPE_MONITOR:
return brcmf_mon_add_vif(wiphy, name);
+ case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
- wdev = brcmf_ap_add_vif(wiphy, name, params);
+ wdev = brcmf_apsta_add_vif(wiphy, name, params, type);
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
@@ -848,8 +1117,8 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
return err;
}
-static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy,
- struct wireless_dev *wdev)
+static int brcmf_cfg80211_del_apsta_iface(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = wdev->netdev;
@@ -906,15 +1175,15 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
return -EOPNOTSUPP;
case NL80211_IFTYPE_MONITOR:
return brcmf_mon_del_vif(wiphy, wdev);
+ case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
- return brcmf_cfg80211_del_ap_iface(wiphy, wdev);
+ return brcmf_cfg80211_del_apsta_iface(wiphy, wdev);
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
@@ -1417,6 +1686,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
locally_generated, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_NONE) {
@@ -2047,6 +2318,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
+ if (sme->channel_hint)
+ chan = sme->channel_hint;
+
+ if (sme->bssid_hint)
+ sme->bssid = sme->bssid_hint;
+
if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
/* A normal (non P2P) connection request setup. */
ie = NULL;
@@ -2269,6 +2546,8 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &ifp->vif->sme_state);
cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
@@ -6002,7 +6281,7 @@ done:
brcmf_dbg(CONN, "Report roaming result\n");
if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X && profile->is_ft) {
- cfg80211_port_authorized(ndev, profile->bssid, GFP_KERNEL);
+ cfg80211_port_authorized(ndev, profile->bssid, NULL, 0, GFP_KERNEL);
brcmf_dbg(CONN, "Report port authorized\n");
}
@@ -6033,6 +6312,10 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
&ifp->vif->sme_state);
conn_params.status = WLAN_STATUS_SUCCESS;
} else {
+ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS,
+ &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS,
+ &ifp->vif->sme_state);
conn_params.status = WLAN_STATUS_AUTH_TIMEOUT;
}
conn_params.links[0].bssid = profile->bssid;
@@ -6130,9 +6413,13 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else if (brcmf_is_linkdown(ifp->vif, e)) {
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif) &&
- test_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state)) {
- if (memcmp(profile->bssid, e->addr, ETH_ALEN))
+ (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state) ||
+ test_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state))) {
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state) &&
+ memcmp(profile->bssid, e->addr, ETH_ALEN))
return err;
brcmf_bss_connect_done(cfg, ndev, e, false);
@@ -6556,6 +6843,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
total = le32_to_cpu(list->count);
+ if (total > BRCMF_MAX_CHANSPEC_LIST) {
+ bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
+ total);
+ err = -EINVAL;
+ goto fail_pbuf;
+ }
+
for (i = 0; i < total; i++) {
ch.chspec = (u16)le32_to_cpu(list->element[i]);
cfg->d11inf.decchspec(&ch);
@@ -6701,6 +6995,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
list = (struct brcmf_chanspec_list *)pbuf;
num_chan = le32_to_cpu(list->count);
+ if (num_chan > BRCMF_MAX_CHANSPEC_LIST) {
+ bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
+ num_chan);
+ kfree(pbuf);
+ return -EINVAL;
+ }
+
for (i = 0; i < num_chan; i++) {
ch.chspec = (u16)le32_to_cpu(list->element[i]);
cfg->d11inf.decchspec(&ch);
@@ -6973,7 +7274,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
*
* p2p, mchan, and mbss:
*
- * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
+ * #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
* #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
* #AP <= 4, matching BI, channels = 1, 4 total
*
@@ -7019,7 +7320,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
goto err;
combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan));
- c0_limits[i].max = 1;
+ c0_limits[i].max = 1 + (p2p && mchan);
c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
if (mon_flag) {
c0_limits[i].max = 1;
@@ -7525,6 +7826,231 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
return 0;
}
+static int
+brcmf_parse_dump_obss(char *buf, struct brcmf_dump_survey *survey)
+{
+ int i;
+ char *token;
+ char delim[] = "\n ";
+ unsigned long val;
+ int err = 0;
+
+ token = strsep(&buf, delim);
+ while (token) {
+ if (!strcmp(token, "OBSS")) {
+ for (i = 0; i < OBSS_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ err = kstrtoul(token, 10, &val);
+ if (err)
+ break;
+ survey->obss = val;
+ }
+
+ if (!strcmp(token, "IBSS")) {
+ for (i = 0; i < IBSS_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ err = kstrtoul(token, 10, &val);
+ if (err)
+ break;
+ survey->ibss = val;
+ }
+
+ if (!strcmp(token, "TXDur")) {
+ for (i = 0; i < TX_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ err = kstrtoul(token, 10, &val);
+ if (err)
+ break;
+ survey->tx = val;
+ }
+
+ if (!strcmp(token, "Category")) {
+ for (i = 0; i < CTG_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ err = kstrtoul(token, 10, &val);
+ if (err)
+ break;
+ survey->no_ctg = val;
+ }
+
+ if (!strcmp(token, "Packet")) {
+ for (i = 0; i < PKT_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ err = kstrtoul(token, 10, &val);
+ if (err)
+ break;
+ survey->no_pckt = val;
+ }
+
+ if (!strcmp(token, "Opp(time):")) {
+ for (i = 0; i < IDLE_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ err = kstrtoul(token, 10, &val);
+ if (err)
+ break;
+ survey->idle = val;
+ }
+
+ token = strsep(&buf, delim);
+ }
+
+ return err;
+}
+
+static int
+brcmf_dump_obss(struct brcmf_if *ifp, struct cca_msrmnt_query req,
+ struct brcmf_dump_survey *survey)
+{
+ struct cca_stats_n_flags *results;
+ char *buf;
+ int err;
+
+ buf = kzalloc(sizeof(char) * BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, &req, sizeof(struct cca_msrmnt_query));
+ err = brcmf_fil_iovar_data_get(ifp, "dump_obss",
+ buf, BRCMF_DCMD_MEDLEN);
+ if (err) {
+ brcmf_err("dump_obss error (%d)\n", err);
+ err = -EINVAL;
+ goto exit;
+ }
+ results = (struct cca_stats_n_flags *)(buf);
+
+ if (req.msrmnt_query)
+ brcmf_parse_dump_obss(results->buf, survey);
+
+exit:
+ kfree(buf);
+ return err;
+}
+
+static s32
+cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ u16 chspec = 0;
+ int err = 0;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+
+ /* set_channel */
+ chspec = channel_to_chanspec(&cfg->d11inf, chan);
+ if (chspec != INVCHANSPEC) {
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chspec);
+ if (err) {
+ brcmf_err("set chanspec 0x%04x fail, reason %d\n", chspec, err);
+ err = -EINVAL;
+ }
+ } else {
+ brcmf_err("failed to convert host chanspec to fw chanspec\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int
+brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
+ int idx, struct survey_info *info)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_dump_survey survey = {};
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *chan;
+ struct cca_msrmnt_query req;
+ u32 noise;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter: channel idx=%d\n", idx);
+
+ /* Do not run survey when VIF in CONNECTING / CONNECTED states */
+ if ((test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) ||
+ (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))) {
+ return -EBUSY;
+ }
+
+ band = wiphy->bands[NL80211_BAND_2GHZ];
+ if (band && idx >= band->n_channels) {
+ idx -= band->n_channels;
+ band = NULL;
+ }
+
+ if (!band || idx >= band->n_channels) {
+ band = wiphy->bands[NL80211_BAND_5GHZ];
+ if (idx >= band->n_channels)
+ return -ENOENT;
+ }
+
+ /* Setting current channel to the requested channel */
+ chan = &band->channels[idx];
+ err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20);
+ if (err) {
+ info->channel = chan;
+ info->filled = 0;
+ return 0;
+ }
+
+ /* Disable mpc */
+ brcmf_set_mpc(ifp, 0);
+
+ /* Set interface up, explicitly. */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+ if (err) {
+ brcmf_err("set interface up failed, err = %d\n", err);
+ goto exit;
+ }
+
+ /* Get noise value */
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PHY_NOISE, &noise);
+ if (err) {
+ brcmf_err("Get Phy Noise failed, use dummy value\n");
+ noise = CHAN_NOISE_DUMMY;
+ }
+
+ /* Start Measurement for obss stats on current channel */
+ req.msrmnt_query = 0;
+ req.time_req = ACS_MSRMNT_DELAY;
+ err = brcmf_dump_obss(ifp, req, &survey);
+ if (err)
+ goto exit;
+
+ /* Add 10 ms for IOVAR completion */
+ msleep(ACS_MSRMNT_DELAY + 10);
+
+ /* Issue IOVAR to collect measurement results */
+ req.msrmnt_query = 1;
+ err = brcmf_dump_obss(ifp, req, &survey);
+ if (err)
+ goto exit;
+
+ info->channel = chan;
+ info->noise = noise;
+ info->time = ACS_MSRMNT_DELAY;
+ info->time_busy = ACS_MSRMNT_DELAY - survey.idle;
+ info->time_rx = survey.obss + survey.ibss + survey.no_ctg +
+ survey.no_pckt;
+ info->time_tx = survey.tx;
+ info->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+
+ brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n",
+ ieee80211_frequency_to_channel(chan->center_freq),
+ ACS_MSRMNT_DELAY);
+ brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n",
+ info->noise, info->time_busy, info->time_rx, info->time_tx);
+
+exit:
+ if (!brcmf_is_apmode(ifp->vif))
+ brcmf_set_mpc(ifp, 1);
+ return err;
+}
+
static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
{
@@ -7676,6 +8202,9 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
ops->set_rekey_data = brcmf_cfg80211_set_rekey_data;
#endif
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_DUMP_OBSS))
+ ops->dump_survey = brcmf_cfg80211_dump_survey;
+
err = wiphy_register(wiphy);
if (err < 0) {
bphy_err(drvr, "Could not register wiphy device (%d)\n", err);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 74020fa10065..4a309e5a5707 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -110,9 +110,9 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
dload_buf->dload_type = cpu_to_le16(DL_TYPE_CLM);
dload_buf->len = cpu_to_le32(len);
dload_buf->crc = cpu_to_le32(0);
- len = sizeof(*dload_buf) + len - 1;
- err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf, len);
+ err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf,
+ struct_size(dload_buf, data, len));
return err;
}
@@ -139,7 +139,8 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
return 0;
}
- chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
+ chunk_buf = kzalloc(struct_size(chunk_buf, data, MAX_CHUNK_LEN),
+ GFP_KERNEL);
if (!chunk_buf) {
err = -ENOMEM;
goto done;
@@ -305,8 +306,12 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_info("Firmware: %s %s\n", ri->chipname, buf);
/* locate firmware version number for ethtool */
- ptr = strrchr(buf, ' ') + 1;
- strscpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+ ptr = strrchr(buf, ' ');
+ if (!ptr) {
+ bphy_err(drvr, "Retrieving version number failed");
+ goto done;
+ }
+ strscpy(ifp->drvr->fwver, ptr + 1, sizeof(ifp->drvr->fwver));
/* Query for 'clmver' to get CLM version info from firmware */
memset(buf, 0, sizeof(buf));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 595ae3ae561e..d354f79fd0ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1399,7 +1399,8 @@ void brcmf_fw_crashed(struct device *dev)
brcmf_dev_coredump(dev);
- schedule_work(&drvr->bus_reset);
+ if (drvr->bus_reset.func)
+ schedule_work(&drvr->bus_reset);
}
void brcmf_detach(struct device *dev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 340346c122d3..2e71b5c2a975 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -136,6 +136,7 @@ struct brcmf_pub {
struct work_struct bus_reset;
u8 clmver[BRCMF_DCMD_SMLEN];
+ u8 sta_mac_idx;
};
/* forward declarations */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 2c2f3e026c13..10bac865d724 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -143,7 +143,7 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
ifp->fwil_fwerr = true;
err = brcmf_fil_iovar_int_get(ifp, name, &data);
- if (err == 0) {
+ if (err != -BRCMF_FW_UNSUPPORTED) {
brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
ifp->drvr->feat_flags |= BIT(id);
} else {
@@ -281,6 +281,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_RSDB, "rsdb_mode");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MFP, "mfp");
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_DUMP_OBSS, "dump_obss");
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
err = brcmf_fil_iovar_data_get(ifp, "pfn_macaddr", &pfn_mac,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index d1f4257af696..f1b086a69d73 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -29,6 +29,7 @@
* DOT11H: firmware supports 802.11h
* SAE: simultaneous authentication of equals
* FWAUTH: Firmware authenticator
+ * DUMP_OBSS: Firmware has capable to dump obss info to support ACS
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -51,7 +52,8 @@
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
BRCMF_FEAT_DEF(SAE) \
- BRCMF_FEAT_DEF(FWAUTH)
+ BRCMF_FEAT_DEF(FWAUTH) \
+ BRCMF_FEAT_DEF(DUMP_OBSS)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index f2207793f6e2..09d2f2dc2b46 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -803,6 +803,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
u32 i, j;
char end = '\0';
+ if (chiprev >= BITS_PER_TYPE(u32)) {
+ brcmf_err("Invalid chip revision %u\n", chiprev);
+ return NULL;
+ }
+
for (i = 0; i < table_size; i++) {
if (mapping_table[i].chipid == chip &&
mapping_table[i].revmask & BIT(chiprev))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index f518e025d6e4..04e1beedfd81 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -943,7 +943,7 @@ struct brcmf_dload_data_le {
__le16 dload_type;
__le32 len;
__le32 crc;
- u8 data[1];
+ u8 data[];
};
/**
@@ -1049,7 +1049,7 @@ struct brcmf_gscan_config {
u8 count_of_channel_buckets;
u8 retry_threshold;
__le16 lost_ap_window;
- struct brcmf_gscan_bucket_config bucket[1];
+ struct brcmf_gscan_bucket_config bucket[];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 10d9d9c63b28..af8843507f3d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2424,8 +2424,12 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
brcmf_remove_interface(vif->ifp, true);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
- if (iftype != NL80211_IFTYPE_P2P_DEVICE)
- p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
+ if (vif == p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif)
+ p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+ if (vif == p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION2].vif)
+ p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION2].vif = NULL;
+ }
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 80083f9ea311..cf564adc612a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -12,6 +12,8 @@
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/kthread.h>
#include <linux/io.h>
#include <asm/unaligned.h>
@@ -340,6 +342,11 @@ struct brcmf_pciedev_info {
u16 value);
struct brcmf_mp_device *settings;
struct brcmf_otp_params otp;
+#ifdef DEBUG
+ u32 console_interval;
+ bool console_active;
+ struct timer_list timer;
+#endif
};
struct brcmf_pcie_ringbuf {
@@ -440,6 +447,9 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq);
static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
+static void
+brcmf_pcie_fwcon_timer(struct brcmf_pciedev_info *devinfo, bool active);
+static void brcmf_pcie_debugfs_create(struct device *dev);
static u16
brcmf_pcie_read_reg16(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
@@ -1218,6 +1228,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
BRCMF_NROF_H2D_COMMON_MSGRINGS;
max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
}
+ if (max_flowrings > 256) {
+ brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
+ return -EIO;
+ }
if (devinfo->dma_idx_sz != 0) {
bufsz = (max_submissionrings + max_completionrings) *
@@ -1413,6 +1427,11 @@ fail:
static void brcmf_pcie_down(struct device *dev)
{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+
+ brcmf_pcie_fwcon_timer(devinfo, false);
}
static int brcmf_pcie_preinit(struct device *dev)
@@ -1547,6 +1566,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.get_memdump = brcmf_pcie_get_memdump,
.get_blob = brcmf_pcie_get_blob,
.reset = brcmf_pcie_reset,
+ .debugfs_create = brcmf_pcie_debugfs_create,
};
@@ -2048,13 +2068,14 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_commonring **flowrings;
u32 i, nvram_len;
+ bus = dev_get_drvdata(dev);
+ pcie_bus_dev = bus->bus_priv.pcie;
+ devinfo = pcie_bus_dev->devinfo;
+
/* check firmware loading result */
if (ret)
goto fail;
- bus = dev_get_drvdata(dev);
- pcie_bus_dev = bus->bus_priv.pcie;
- devinfo = pcie_bus_dev->devinfo;
brcmf_pcie_attach(devinfo);
fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
@@ -2123,9 +2144,14 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
brcmf_pcie_bus_console_read(devinfo, false);
+ brcmf_pcie_fwcon_timer(devinfo, true);
+
return;
fail:
+ brcmf_err(bus, "Dongle setup failed\n");
+ brcmf_pcie_bus_console_read(devinfo, true);
+ brcmf_fw_crashed(dev);
device_release_driver(dev);
}
@@ -2197,6 +2223,105 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
return fwreq;
}
+#ifdef DEBUG
+static void
+brcmf_pcie_fwcon_timer(struct brcmf_pciedev_info *devinfo, bool active)
+{
+ if (!active) {
+ if (devinfo->console_active) {
+ del_timer_sync(&devinfo->timer);
+ devinfo->console_active = false;
+ }
+ return;
+ }
+
+ /* don't start the timer */
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP ||
+ !devinfo->console_interval || !BRCMF_FWCON_ON())
+ return;
+
+ if (!devinfo->console_active) {
+ devinfo->timer.expires = jiffies + devinfo->console_interval;
+ add_timer(&devinfo->timer);
+ devinfo->console_active = true;
+ } else {
+ /* Reschedule the timer */
+ mod_timer(&devinfo->timer, jiffies + devinfo->console_interval);
+ }
+}
+
+static void
+brcmf_pcie_fwcon(struct timer_list *t)
+{
+ struct brcmf_pciedev_info *devinfo = from_timer(devinfo, t, timer);
+
+ if (!devinfo->console_active)
+ return;
+
+ brcmf_pcie_bus_console_read(devinfo, false);
+
+ /* Reschedule the timer if console interval is not zero */
+ mod_timer(&devinfo->timer, jiffies + devinfo->console_interval);
+}
+
+static int brcmf_pcie_console_interval_get(void *data, u64 *val)
+{
+ struct brcmf_pciedev_info *devinfo = data;
+
+ *val = devinfo->console_interval;
+
+ return 0;
+}
+
+static int brcmf_pcie_console_interval_set(void *data, u64 val)
+{
+ struct brcmf_pciedev_info *devinfo = data;
+
+ if (val > MAX_CONSOLE_INTERVAL)
+ return -EINVAL;
+
+ devinfo->console_interval = val;
+
+ if (!val && devinfo->console_active)
+ brcmf_pcie_fwcon_timer(devinfo, false);
+ else if (val)
+ brcmf_pcie_fwcon_timer(devinfo, true);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(brcmf_pcie_console_interval_fops,
+ brcmf_pcie_console_interval_get,
+ brcmf_pcie_console_interval_set,
+ "%llu\n");
+
+static void brcmf_pcie_debugfs_create(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+ struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ devinfo->console_interval = BRCMF_CONSOLE;
+
+ debugfs_create_file("console_interval", 0644, dentry, devinfo,
+ &brcmf_pcie_console_interval_fops);
+}
+
+#else
+void brcmf_pcie_fwcon_timer(struct brcmf_pciedev_info *devinfo, bool active)
+{
+}
+
+static void brcmf_pcie_debugfs_create(struct device *dev)
+{
+}
+#endif
+
static int
brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -2278,6 +2403,11 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_brcmf;
}
+#ifdef DEBUG
+ /* Set up the fwcon timer */
+ timer_setup(&devinfo->timer, brcmf_pcie_fwcon, 0);
+#endif
+
fwreq = brcmf_pcie_prepare_fw_request(devinfo);
if (!fwreq) {
ret = -ENOMEM;
@@ -2323,6 +2453,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
devinfo = bus->bus_priv.pcie->devinfo;
brcmf_pcie_bus_console_read(devinfo, false);
+ brcmf_pcie_fwcon_timer(devinfo, false);
devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
if (devinfo->ci)
@@ -2366,6 +2497,7 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev)
bus = dev_get_drvdata(dev);
devinfo = bus->bus_priv.pcie->devinfo;
+ brcmf_pcie_fwcon_timer(devinfo, false);
brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
devinfo->mbdata_completed = false;
@@ -2409,6 +2541,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
brcmf_bus_change_state(bus, BRCMF_BUS_UP);
brcmf_pcie_intr_enable(devinfo);
brcmf_pcie_hostready(devinfo);
+ brcmf_pcie_fwcon_timer(devinfo, true);
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 170c61c8136c..05f66ab13bed 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -405,7 +405,7 @@ static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
if (n_buckets < 0)
return n_buckets;
- gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
+ gsz = struct_size(gscan_cfg, bucket, n_buckets);
gscan_cfg = kzalloc(gsz, GFP_KERNEL);
if (!gscan_cfg) {
err = -ENOMEM;
@@ -434,8 +434,8 @@ static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
gscan_cfg->count_of_channel_buckets = n_buckets;
- memcpy(&gscan_cfg->bucket[0], buckets,
- n_buckets * sizeof(*buckets));
+ memcpy(gscan_cfg->bucket, buckets,
+ array_size(n_buckets, sizeof(*buckets)));
err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 465d95d83759..244ba48cc304 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -135,8 +135,6 @@ struct rte_console {
#define BRCMF_FIRSTREAD (1 << 6)
-#define BRCMF_CONSOLE 10 /* watchdog interval to poll console */
-
/* SBSDIO_DEVICE_CTL */
/* 1: device will assert busy signal when receiving CMD53 */
@@ -1886,7 +1884,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
}
rd->len_left = rd->len;
- /* read header first for unknow frame length */
+ /* read header first for unknown frame length */
sdio_claim_host(bus->sdiodev->func1);
if (!rd->len) {
ret = brcmf_sdiod_recv_buf(bus->sdiodev,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
index c1b9ac692d26..9540a05247c2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
@@ -63,9 +63,6 @@ int brcms_led_register(struct brcms_info *wl)
int hwnum = -1;
enum gpio_lookup_flags lflags = GPIO_ACTIVE_HIGH;
- if (!bcma_gpio || !gpio_is_valid(bcma_gpio->base))
- return -ENODEV;
-
/* find radio enabled LED */
for (i = 0; i < BRCMS_LED_NO; i++) {
u8 led = *leds[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index a4034d44609b..a8333e6adbda 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -962,6 +962,7 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = brcms_ops_start,
.stop = brcms_ops_stop,
.add_interface = brcms_ops_add_interface,
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index fb2c35bd73bb..7c4cc5f5e1eb 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -4807,7 +4807,8 @@ static int get_dec_u16(char *buffer, int *start, int limit)
}
static int airo_config_commit(struct net_device *dev,
- struct iw_request_info *info, void *zwrq,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
char *extra);
static inline int sniffing_mode(struct airo_info *ai)
@@ -5814,10 +5815,10 @@ static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
*/
static int airo_get_name(struct net_device *dev,
struct iw_request_info *info,
- char *cwrq,
+ union iwreq_data *cwrq,
char *extra)
{
- strcpy(cwrq, "IEEE 802.11-DS");
+ strcpy(cwrq->name, "IEEE 802.11-DS");
return 0;
}
@@ -5827,9 +5828,10 @@ static int airo_get_name(struct net_device *dev,
*/
static int airo_set_freq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *fwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_freq *fwrq = &wrqu->freq;
struct airo_info *local = dev->ml_priv;
int rc = -EINPROGRESS; /* Call commit handler */
@@ -5868,9 +5870,10 @@ static int airo_set_freq(struct net_device *dev,
*/
static int airo_get_freq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *fwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_freq *fwrq = &wrqu->freq;
struct airo_info *local = dev->ml_priv;
StatusRid status_rid; /* Card status info */
int ch;
@@ -5900,9 +5903,10 @@ static int airo_get_freq(struct net_device *dev,
*/
static int airo_set_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->essid;
struct airo_info *local = dev->ml_priv;
SsidRid SSID_rid; /* SSIDs */
@@ -5945,9 +5949,10 @@ static int airo_set_essid(struct net_device *dev,
*/
static int airo_get_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->essid;
struct airo_info *local = dev->ml_priv;
StatusRid status_rid; /* Card status info */
@@ -5973,9 +5978,10 @@ static int airo_get_essid(struct net_device *dev,
*/
static int airo_set_wap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *awrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct sockaddr *awrq = &wrqu->ap_addr;
struct airo_info *local = dev->ml_priv;
Cmd cmd;
Resp rsp;
@@ -6008,9 +6014,10 @@ static int airo_set_wap(struct net_device *dev,
*/
static int airo_get_wap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *awrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct sockaddr *awrq = &wrqu->ap_addr;
struct airo_info *local = dev->ml_priv;
StatusRid status_rid; /* Card status info */
@@ -6029,9 +6036,10 @@ static int airo_get_wap(struct net_device *dev,
*/
static int airo_set_nick(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct airo_info *local = dev->ml_priv;
/* Check the size of the string */
@@ -6052,9 +6060,10 @@ static int airo_set_nick(struct net_device *dev,
*/
static int airo_get_nick(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -6071,9 +6080,10 @@ static int airo_get_nick(struct net_device *dev,
*/
static int airo_set_rate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->bitrate;
struct airo_info *local = dev->ml_priv;
CapabilityRid cap_rid; /* Card capability info */
u8 brate = 0;
@@ -6141,9 +6151,10 @@ static int airo_set_rate(struct net_device *dev,
*/
static int airo_get_rate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->bitrate;
struct airo_info *local = dev->ml_priv;
StatusRid status_rid; /* Card status info */
@@ -6163,9 +6174,10 @@ static int airo_get_rate(struct net_device *dev,
*/
static int airo_set_rts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->rts;
struct airo_info *local = dev->ml_priv;
int rthr = vwrq->value;
@@ -6187,9 +6199,10 @@ static int airo_set_rts(struct net_device *dev,
*/
static int airo_get_rts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->rts;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -6206,9 +6219,9 @@ static int airo_get_rts(struct net_device *dev,
*/
static int airo_set_frag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *vwrq = &wrqu->frag;
struct airo_info *local = dev->ml_priv;
int fthr = vwrq->value;
@@ -6231,9 +6244,10 @@ static int airo_set_frag(struct net_device *dev,
*/
static int airo_get_frag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->frag;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -6250,9 +6264,10 @@ static int airo_get_frag(struct net_device *dev,
*/
static int airo_set_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 *uwrq,
+ union iwreq_data *uwrq,
char *extra)
{
+ __u32 mode = uwrq->mode;
struct airo_info *local = dev->ml_priv;
int reset = 0;
@@ -6260,7 +6275,7 @@ static int airo_set_mode(struct net_device *dev,
if (sniffing_mode(local))
reset = 1;
- switch(*uwrq) {
+ switch (mode) {
case IW_MODE_ADHOC:
local->config.opmode &= ~MODE_CFG_MASK;
local->config.opmode |= MODE_STA_IBSS;
@@ -6313,7 +6328,7 @@ static int airo_set_mode(struct net_device *dev,
*/
static int airo_get_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 *uwrq,
+ union iwreq_data *uwrq,
char *extra)
{
struct airo_info *local = dev->ml_priv;
@@ -6322,16 +6337,16 @@ static int airo_get_mode(struct net_device *dev,
/* If not managed, assume it's ad-hoc */
switch (local->config.opmode & MODE_CFG_MASK) {
case MODE_STA_ESS:
- *uwrq = IW_MODE_INFRA;
+ uwrq->mode = IW_MODE_INFRA;
break;
case MODE_AP:
- *uwrq = IW_MODE_MASTER;
+ uwrq->mode = IW_MODE_MASTER;
break;
case MODE_AP_RPTR:
- *uwrq = IW_MODE_REPEAT;
+ uwrq->mode = IW_MODE_REPEAT;
break;
default:
- *uwrq = IW_MODE_ADHOC;
+ uwrq->mode = IW_MODE_ADHOC;
}
return 0;
@@ -6348,9 +6363,10 @@ static inline int valid_index(struct airo_info *ai, int index)
*/
static int airo_set_encode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->encoding;
struct airo_info *local = dev->ml_priv;
int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1);
__le16 currentAuthType = local->config.authType;
@@ -6447,9 +6463,10 @@ static int airo_set_encode(struct net_device *dev,
*/
static int airo_get_encode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->encoding;
struct airo_info *local = dev->ml_priv;
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
int wep_key_len;
@@ -6794,9 +6811,10 @@ static int airo_get_auth(struct net_device *dev,
*/
static int airo_set_txpow(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->txpower;
struct airo_info *local = dev->ml_priv;
CapabilityRid cap_rid; /* Card capability info */
int i;
@@ -6831,9 +6849,10 @@ static int airo_set_txpow(struct net_device *dev,
*/
static int airo_get_txpow(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->txpower;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -6851,9 +6870,10 @@ static int airo_get_txpow(struct net_device *dev,
*/
static int airo_set_retry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->retry;
struct airo_info *local = dev->ml_priv;
int rc = -EINVAL;
@@ -6889,9 +6909,10 @@ static int airo_set_retry(struct net_device *dev,
*/
static int airo_get_retry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->retry;
struct airo_info *local = dev->ml_priv;
vwrq->disabled = 0; /* Can't be disabled */
@@ -6920,9 +6941,10 @@ static int airo_get_retry(struct net_device *dev,
*/
static int airo_get_range(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct airo_info *local = dev->ml_priv;
struct iw_range *range = (struct iw_range *) extra;
CapabilityRid cap_rid; /* Card capability info */
@@ -7046,9 +7068,9 @@ static int airo_get_range(struct net_device *dev,
*/
static int airo_set_power(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *vwrq = &wrqu->power;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -7104,9 +7126,10 @@ static int airo_set_power(struct net_device *dev,
*/
static int airo_get_power(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->power;
struct airo_info *local = dev->ml_priv;
__le16 mode;
@@ -7135,9 +7158,10 @@ static int airo_get_power(struct net_device *dev,
*/
static int airo_set_sens(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->sens;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -7154,9 +7178,10 @@ static int airo_set_sens(struct net_device *dev,
*/
static int airo_get_sens(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *vwrq = &wrqu->sens;
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
@@ -7174,9 +7199,10 @@ static int airo_get_sens(struct net_device *dev,
*/
static int airo_get_aplist(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct airo_info *local = dev->ml_priv;
struct sockaddr *address = (struct sockaddr *) extra;
struct iw_quality *qual;
@@ -7252,7 +7278,7 @@ static int airo_get_aplist(struct net_device *dev,
*/
static int airo_set_scan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
struct airo_info *ai = dev->ml_priv;
@@ -7483,9 +7509,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
*/
static int airo_get_scan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *dwrq = &wrqu->data;
struct airo_info *ai = dev->ml_priv;
BSSListElement *net;
int err = 0;
@@ -7527,7 +7554,7 @@ out:
*/
static int airo_config_commit(struct net_device *dev,
struct iw_request_info *info, /* NULL */
- void *zwrq, /* NULL */
+ union iwreq_data *wrqu, /* NULL */
char *extra) /* NULL */
{
struct airo_info *local = dev->ml_priv;
@@ -7577,61 +7604,46 @@ static const struct iw_priv_args airo_private_args[] = {
static const iw_handler airo_handler[] =
{
- (iw_handler) airo_config_commit, /* SIOCSIWCOMMIT */
- (iw_handler) airo_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) airo_set_freq, /* SIOCSIWFREQ */
- (iw_handler) airo_get_freq, /* SIOCGIWFREQ */
- (iw_handler) airo_set_mode, /* SIOCSIWMODE */
- (iw_handler) airo_get_mode, /* SIOCGIWMODE */
- (iw_handler) airo_set_sens, /* SIOCSIWSENS */
- (iw_handler) airo_get_sens, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) airo_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
- (iw_handler) airo_set_wap, /* SIOCSIWAP */
- (iw_handler) airo_get_wap, /* SIOCGIWAP */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) airo_get_aplist, /* SIOCGIWAPLIST */
- (iw_handler) airo_set_scan, /* SIOCSIWSCAN */
- (iw_handler) airo_get_scan, /* SIOCGIWSCAN */
- (iw_handler) airo_set_essid, /* SIOCSIWESSID */
- (iw_handler) airo_get_essid, /* SIOCGIWESSID */
- (iw_handler) airo_set_nick, /* SIOCSIWNICKN */
- (iw_handler) airo_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) airo_set_rate, /* SIOCSIWRATE */
- (iw_handler) airo_get_rate, /* SIOCGIWRATE */
- (iw_handler) airo_set_rts, /* SIOCSIWRTS */
- (iw_handler) airo_get_rts, /* SIOCGIWRTS */
- (iw_handler) airo_set_frag, /* SIOCSIWFRAG */
- (iw_handler) airo_get_frag, /* SIOCGIWFRAG */
- (iw_handler) airo_set_txpow, /* SIOCSIWTXPOW */
- (iw_handler) airo_get_txpow, /* SIOCGIWTXPOW */
- (iw_handler) airo_set_retry, /* SIOCSIWRETRY */
- (iw_handler) airo_get_retry, /* SIOCGIWRETRY */
- (iw_handler) airo_set_encode, /* SIOCSIWENCODE */
- (iw_handler) airo_get_encode, /* SIOCGIWENCODE */
- (iw_handler) airo_set_power, /* SIOCSIWPOWER */
- (iw_handler) airo_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCSIWGENIE */
- (iw_handler) NULL, /* SIOCGIWGENIE */
- (iw_handler) airo_set_auth, /* SIOCSIWAUTH */
- (iw_handler) airo_get_auth, /* SIOCGIWAUTH */
- (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
+ IW_HANDLER(SIOCSIWCOMMIT, airo_config_commit),
+ IW_HANDLER(SIOCGIWNAME, airo_get_name),
+ IW_HANDLER(SIOCSIWFREQ, airo_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, airo_get_freq),
+ IW_HANDLER(SIOCSIWMODE, airo_set_mode),
+ IW_HANDLER(SIOCGIWMODE, airo_get_mode),
+ IW_HANDLER(SIOCSIWSENS, airo_set_sens),
+ IW_HANDLER(SIOCGIWSENS, airo_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, airo_get_range),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, airo_set_wap),
+ IW_HANDLER(SIOCGIWAP, airo_get_wap),
+ IW_HANDLER(SIOCGIWAPLIST, airo_get_aplist),
+ IW_HANDLER(SIOCSIWSCAN, airo_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, airo_get_scan),
+ IW_HANDLER(SIOCSIWESSID, airo_set_essid),
+ IW_HANDLER(SIOCGIWESSID, airo_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, airo_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, airo_get_nick),
+ IW_HANDLER(SIOCSIWRATE, airo_set_rate),
+ IW_HANDLER(SIOCGIWRATE, airo_get_rate),
+ IW_HANDLER(SIOCSIWRTS, airo_set_rts),
+ IW_HANDLER(SIOCGIWRTS, airo_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, airo_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, airo_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, airo_set_txpow),
+ IW_HANDLER(SIOCGIWTXPOW, airo_get_txpow),
+ IW_HANDLER(SIOCSIWRETRY, airo_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, airo_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, airo_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, airo_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, airo_set_power),
+ IW_HANDLER(SIOCGIWPOWER, airo_get_power),
+ IW_HANDLER(SIOCSIWAUTH, airo_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, airo_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, airo_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, airo_get_encodeext),
};
/* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here.
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 5b483de18c81..ca802af8cddc 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -2995,20 +2995,6 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_unlock_irqrestore(&priv->ieee->lock, flags);
}
-/*
- * Check that card is still alive.
- * Reads debug register from domain0.
- * If card is present, pre-defined value should
- * be found there.
- *
- * @param priv
- * @return 1 if card is present, 0 otherwise
- */
-static inline int ipw_alive(struct ipw_priv *priv)
-{
- return ipw_read32(priv, 0x90) == 0xd55555d5;
-}
-
/* timeout in msec, attempted in 10-msec quanta */
static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
int timeout)
@@ -9870,7 +9856,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
/* Rebase the WE IOCTLs to zero for the handler array */
static iw_handler ipw_wx_handlers[] = {
- IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 7352d5b2095f..d7e99d50b287 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1202,8 +1202,6 @@ il3945_rx_handle(struct il_priv *il)
D_RX("r = %d, i = %d\n", r, i);
while (i != r) {
- int len;
-
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -1217,10 +1215,6 @@ il3945_rx_handle(struct il_priv *il)
PAGE_SIZE << il->hw_params.rx_page_order,
DMA_FROM_DEVICE);
pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
-
reclaim = il_need_reclaim(il, pkt);
/* Based on type of command response or notification,
@@ -3435,6 +3429,7 @@ static const struct attribute_group il3945_attribute_group = {
static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
.tx = il3945_mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il3945_mac_start,
.stop = il3945_mac_stop,
.add_interface = il_mac_add_interface,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 943de47170c7..721b4042b4bf 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6304,6 +6304,7 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
static const struct ieee80211_ops il4965_mac_ops = {
.tx = il4965_mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il4965_mac_start,
.stop = il4965_mac_stop,
.add_interface = il_mac_add_interface,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 110fda65bd21..ec6198f1b38c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -172,6 +172,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
BIT(NL80211_BAND_6GHZ),
};
+static const struct iwl_ht_params iwl_gl_a_ht_params = {
+ .stbc = false, /* we explicitly disable STBC for GL step A */
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
+ BIT(NL80211_BAND_6GHZ),
+};
+
#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
@@ -249,7 +256,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-#define IWL_DEVICE_BZ \
+#define IWL_DEVICE_BZ_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.led_mode = IWL_LED_RF_STATE, \
@@ -261,12 +268,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dccm2_len = IWL_22000_DCCM2_LEN, \
.smem_offset = IWL_22000_SMEM_OFFSET, \
.smem_len = IWL_22000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = 0x30, \
- .ht_params = &iwl_22000_ht_params, \
.nvm_ver = IWL_22000_NVM_VERSION, \
.trans.use_tfh = true, \
.trans.rf_id = true, \
@@ -313,6 +318,14 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
+#define IWL_DEVICE_BZ \
+ IWL_DEVICE_BZ_COMMON, \
+ .ht_params = &iwl_22000_ht_params
+
+#define IWL_DEVICE_GL_A \
+ IWL_DEVICE_BZ_COMMON, \
+ .ht_params = &iwl_gl_a_ht_params
+
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
@@ -901,6 +914,7 @@ const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = {
.fw_name_pre = IWL_BZ_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -908,6 +922,7 @@ const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = {
.fw_name_pre = IWL_BZ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -915,6 +930,7 @@ const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = {
.fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -922,6 +938,7 @@ const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = {
.fw_name_pre = IWL_BZ_A_MR_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -929,6 +946,7 @@ const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = {
.fw_name_pre = IWL_BZ_A_FM_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -936,13 +954,15 @@ const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0 = {
.fw_name_pre = IWL_BZ_A_FM4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.fw_name_pre = IWL_GL_A_FM_A_FW_PRE,
.uhb_supported = true,
- IWL_DEVICE_BZ,
+ IWL_DEVICE_GL_A,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -950,6 +970,7 @@ const struct iwl_cfg iwl_cfg_gl_b0_fm_b0 = {
.fw_name_pre = IWL_GL_B_FM_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -957,6 +978,7 @@ const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
.fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -964,6 +986,7 @@ const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = {
.fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -971,6 +994,7 @@ const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = {
.fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -978,6 +1002,7 @@ const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = {
.fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -985,6 +1010,7 @@ const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = {
.fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -992,6 +1018,7 @@ const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
.fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
@@ -999,6 +1026,7 @@ const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0 = {
.fw_name_pre = IWL_BNJ_B_FM_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index f4070fddc8c7..b1939ff275b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1571,6 +1571,7 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
const struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = iwlagn_mac_start,
.stop = iwlagn_mac_stop,
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 43619acc29fd..d07982d8c897 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -72,6 +72,11 @@ enum iwl_data_path_subcmd_ids {
SCD_QUEUE_CONFIG_CMD = 0x17,
/**
+ * @SEC_KEY_CMD: security key command, uses &struct iwl_sec_key_cmd
+ */
+ SEC_KEY_CMD = 0x18,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
@@ -403,4 +408,78 @@ struct iwl_scd_queue_cfg_cmd {
} __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+/**
+ * enum iwl_sec_key_flags - security key command key flags
+ * @IWL_SEC_KEY_FLAG_CIPHER_MASK: cipher mask
+ * @IWL_SEC_KEY_FLAG_CIPHER_WEP: WEP cipher
+ * @IWL_SEC_KEY_FLAG_CIPHER_CCMP: CCMP/CMAC cipher
+ * @IWL_SEC_KEY_FLAG_CIPHER_TKIP: TKIP cipher
+ * @IWL_SEC_KEY_FLAG_CIPHER_GCMP: GCMP/GMAC cipher
+ * @IWL_SEC_KEY_FLAG_NO_TX: don't install for TX
+ * @IWL_SEC_KEY_FLAG_KEY_SIZE: large key size (WEP-104, GCMP-256, GMAC-256)
+ * @IWL_SEC_KEY_FLAG_MFP: MFP is in used for this key
+ * @IWL_SEC_KEY_FLAG_MCAST_KEY: this is a multicast key
+ * @IWL_SEC_KEY_FLAG_SPP_AMSDU: SPP A-MSDU should be used
+ */
+enum iwl_sec_key_flags {
+ IWL_SEC_KEY_FLAG_CIPHER_MASK = 0x07,
+ IWL_SEC_KEY_FLAG_CIPHER_WEP = 0x01,
+ IWL_SEC_KEY_FLAG_CIPHER_CCMP = 0x02,
+ IWL_SEC_KEY_FLAG_CIPHER_TKIP = 0x03,
+ IWL_SEC_KEY_FLAG_CIPHER_GCMP = 0x05,
+ IWL_SEC_KEY_FLAG_NO_TX = 0x08,
+ IWL_SEC_KEY_FLAG_KEY_SIZE = 0x10,
+ IWL_SEC_KEY_FLAG_MFP = 0x20,
+ IWL_SEC_KEY_FLAG_MCAST_KEY = 0x40,
+ IWL_SEC_KEY_FLAG_SPP_AMSDU = 0x80,
+};
+
+#define IWL_SEC_WEP_KEY_OFFSET 3
+
+/**
+ * struct iwl_sec_key_cmd - security key command
+ * @action: action from &enum iwl_ctxt_action
+ * @u.add.sta_mask: station mask for the new key
+ * @u.add.key_id: key ID (0-7) for the new key
+ * @u.add.key_flags: key flags per &enum iwl_sec_key_flags
+ * @u.add.key: key material. WEP keys should start from &IWL_SEC_WEP_KEY_OFFSET.
+ * @u.add.tkip_mic_rx_key: TKIP MIC RX key
+ * @u.add.tkip_mic_tx_key: TKIP MIC TX key
+ * @u.add.rx_seq: RX sequence counter value
+ * @u.add.tx_seq: TX sequence counter value
+ * @u.modify.old_sta_mask: old station mask
+ * @u.modify.new_sta_mask: new station mask
+ * @u.modify.key_id: key ID
+ * @u.modify.key_flags: new key flags
+ * @u.remove.sta_mask: station mask
+ * @u.remove.key_id: key ID
+ * @u.remove.key_flags: key flags
+ */
+struct iwl_sec_key_cmd {
+ __le32 action;
+ union {
+ struct {
+ __le32 sta_mask;
+ __le32 key_id;
+ __le32 key_flags;
+ u8 key[32];
+ u8 tkip_mic_rx_key[8];
+ u8 tkip_mic_tx_key[8];
+ __le64 rx_seq;
+ __le64 tx_seq;
+ } __packed add; /* SEC_KEY_ADD_CMD_API_S_VER_1 */
+ struct {
+ __le32 old_sta_mask;
+ __le32 new_sta_mask;
+ __le32 key_id;
+ __le32 key_flags;
+ } __packed modify; /* SEC_KEY_MODIFY_CMD_API_S_VER_1 */
+ struct {
+ __le32 sta_mask;
+ __le32 key_id;
+ __le32 key_flags;
+ } __packed remove; /* SEC_KEY_REMOVE_CMD_API_S_VER_1 */
+ } __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */
+} __packed; /* SEC_KEY_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 9b7caf968346..e3eda251c728 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -398,7 +398,7 @@ struct iwl_he_backoff_conf {
* @IWL_HE_PKT_EXT_64QAM: 64-QAM
* @IWL_HE_PKT_EXT_256QAM: 256-QAM
* @IWL_HE_PKT_EXT_1024QAM: 1024-QAM
- * @IWL_HE_PKT_EXT_RESERVED: reserved value
+ * @IWL_HE_PKT_EXT_4096QAM: 4096-QAM, for EHT only
* @IWL_HE_PKT_EXT_NONE: not defined
*/
enum iwl_he_pkt_ext_constellations {
@@ -408,7 +408,7 @@ enum iwl_he_pkt_ext_constellations {
IWL_HE_PKT_EXT_64QAM,
IWL_HE_PKT_EXT_256QAM,
IWL_HE_PKT_EXT_1024QAM,
- IWL_HE_PKT_EXT_RESERVED,
+ IWL_HE_PKT_EXT_4096QAM,
IWL_HE_PKT_EXT_NONE,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index e66f77924f83..2f7d8558becd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -13,10 +13,12 @@
#define PHY_BAND_6 (2)
/* Supported channel width, vary if there is VHT support */
-#define PHY_VHT_CHANNEL_MODE20 (0x0)
-#define PHY_VHT_CHANNEL_MODE40 (0x1)
-#define PHY_VHT_CHANNEL_MODE80 (0x2)
-#define PHY_VHT_CHANNEL_MODE160 (0x3)
+#define IWL_PHY_CHANNEL_MODE20 0x0
+#define IWL_PHY_CHANNEL_MODE40 0x1
+#define IWL_PHY_CHANNEL_MODE80 0x2
+#define IWL_PHY_CHANNEL_MODE160 0x3
+/* and 320 MHz for EHT */
+#define IWL_PHY_CHANNEL_MODE320 0x4
/*
* Control channel position:
@@ -24,20 +26,17 @@
* For VHT - bit-2 marks if the control is lower/upper relative to center-freq
* bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
* center_freq
- * |
- * 40Mhz |_______|_______|
- * 80Mhz |_______|_______|_______|_______|
- * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
- * code 011 010 001 000 | 100 101 110 111
+ * For EHT - bit-3 is used for extended distance
+ * |
+ * 40Mhz |____|____|
+ * 80Mhz |____|____|____|____|
+ * 160Mhz |____|____|____|____|____|____|____|____|
+ * 320MHz |____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|
+ * code 1011 1010 1001 1000 0011 0010 0001 0000 0100 0101 0110 0111 1100 1101 1110 1111
*/
-#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
-#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
-#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
-#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
-#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
-#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
-#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
-#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+#define IWL_PHY_CTRL_POS_ABOVE 0x4
+#define IWL_PHY_CTRL_POS_OFFS_EXT 0x8
+#define IWL_PHY_CTRL_POS_OFFS_MSK 0x3
/*
* struct iwl_fw_channel_info_v1 - channel information
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 687f804c46b7..ddacd5b45aea 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -36,14 +36,14 @@ enum iwl_tlc_mng_cfg_flags {
* @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
- * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
+ * @IWL_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel
*/
enum iwl_tlc_mng_cfg_cw {
IWL_TLC_MNG_CH_WIDTH_20MHZ,
IWL_TLC_MNG_CH_WIDTH_40MHZ,
IWL_TLC_MNG_CH_WIDTH_80MHZ,
IWL_TLC_MNG_CH_WIDTH_160MHZ,
- IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
+ IWL_TLC_MNG_CH_WIDTH_320MHZ,
};
/**
@@ -64,8 +64,7 @@ enum iwl_tlc_mng_cfg_chains {
* @IWL_TLC_MNG_MODE_HT: enable HT
* @IWL_TLC_MNG_MODE_VHT: enable VHT
* @IWL_TLC_MNG_MODE_HE: enable HE
- * @IWL_TLC_MNG_MODE_INVALID: invalid value
- * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ * @IWL_TLC_MNG_MODE_EHT: enable EHT
*/
enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_CCK = 0,
@@ -74,8 +73,7 @@ enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_HT,
IWL_TLC_MNG_MODE_VHT,
IWL_TLC_MNG_MODE_HE,
- IWL_TLC_MNG_MODE_INVALID,
- IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+ IWL_TLC_MNG_MODE_EHT,
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index a835214611ce..e128d2e07f38 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -218,6 +218,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
type = "HT";
else if (format == RATE_MCS_HE_MSK)
type = "HE";
+ else if (format == RATE_MCS_EHT_MSK)
+ type = "EHT";
else
type = "Unknown"; /* shouldn't happen */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3237d4b528b5..6d6c12999645 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -590,6 +590,9 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
+ } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
+ alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
+ return -EIO;
}
remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
@@ -789,7 +792,7 @@ static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
- i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
+ i < IWL_FW_INI_ALLOCATION_NUM; i++) {
ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
if (!ret)
dram_alloc = true;
@@ -1324,7 +1327,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
"WRT: removing allocation id %d from region id %d\n",
le32_to_cpu(reg->dram_alloc_id), i);
- failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id);
+ failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id));
fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
kfree(*active_reg);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index a2203f661321..ab7065c93826 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1971,3 +1971,6 @@ MODULE_PARM_DESC(remove_when_gone,
module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
S_IRUGO);
MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
+
+module_param_named(disable_11be, iwlwifi_mod_params.disable_11be, bool, 0444);
+MODULE_PARM_DESC(disable_11be, "Disable EHT capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index 0efffb6eeb1e..baa643386018 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018 Intel Corporation
+ * Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#ifndef __iwl_eeprom_parse_h__
@@ -31,6 +31,7 @@ struct iwl_nvm_data {
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
+ bool sku_cap_11be_enable;
u16 radio_cfg_type;
u8 radio_cfg_step;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index d0b4d02bdab9..1cf26ab4f488 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -62,6 +62,7 @@ enum iwl_uapsd_disable {
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
* @enable_ini: enable new FW debug infratructure (INI TLVs)
+ * @disable_11be: disable EHT capabilities, default = false.
*/
struct iwl_mod_params {
int swcrypto;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 9040da3dcce3..476b90f32626 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -546,7 +546,7 @@ static const u8 iwl_vendor_caps[] = {
0x00
};
-static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
+static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
{
.types_mask = BIT(NL80211_IFTYPE_STATION),
.he_cap = {
@@ -571,10 +571,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS |
IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX,
- .phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@@ -631,6 +627,78 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
*/
.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[8] =
+ IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA |
+ IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA,
+ },
+
+ /* For all MCS and bandwidth, set 2 NSS for both Tx and
+ * Rx - note we don't set the only_20mhz, but due to this
+ * being a union, it gets set correctly anyway.
+ */
+ .eht_mcs_nss_supp = {
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x22,
+ .rx_tx_mcs11_max_nss = 0x22,
+ .rx_tx_mcs13_max_nss = 0x22,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x22,
+ .rx_tx_mcs11_max_nss = 0x22,
+ .rx_tx_mcs13_max_nss = 0x22,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x22,
+ .rx_tx_mcs11_max_nss = 0x22,
+ .rx_tx_mcs13_max_nss = 0x22,
+ },
+ },
+
+ /*
+ * PPE thresholds for NSS = 2, and RU index bitmap set
+ * to 0xc.
+ */
+ .eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
+ },
},
{
.types_mask = BIT(NL80211_IFTYPE_AP),
@@ -644,9 +712,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
- .phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
.phy_cap_info[2] =
@@ -687,6 +752,49 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
*/
.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ },
+
+ /* For all MCS and bandwidth, set 2 NSS for both Tx and
+ * Rx - note we don't set the only_20mhz, but due to this
+ * being a union, it gets set correctly anyway.
+ */
+ .eht_mcs_nss_supp = {
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x22,
+ .rx_tx_mcs11_max_nss = 0x22,
+ .rx_tx_mcs13_max_nss = 0x22,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x22,
+ .rx_tx_mcs11_max_nss = 0x22,
+ .rx_tx_mcs13_max_nss = 0x22,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x22,
+ .rx_tx_mcs11_max_nss = 0x22,
+ .rx_tx_mcs13_max_nss = 0x22,
+ },
+ },
+
+ /*
+ * PPE thresholds for NSS = 2, and RU index bitmap set
+ * to 0xc.
+ */
+ .eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
+ },
},
};
@@ -738,6 +846,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
static void
iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+ struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
struct ieee80211_sband_iftype_data *iftype_data,
u8 tx_chains, u8 rx_chains,
@@ -745,6 +854,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
{
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+ if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
+ iftype_data->eht_cap.has_eht = false;
+
/* Advertise an A-MPDU exponent extension based on
* operating band
*/
@@ -755,9 +867,30 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
- if (is_ap && iwlwifi_mod_params.nvm_file)
+ switch (sband->band) {
+ case NL80211_BAND_2GHZ:
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] |=
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ break;
+ case NL80211_BAND_6GHZ:
+ if (!is_ap || iwlwifi_mod_params.nvm_file)
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+ fallthrough;
+ case NL80211_BAND_5GHZ:
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (!is_ap || iwlwifi_mod_params.nvm_file)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
if ((tx_chains & rx_chains) == ANT_AB) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
@@ -765,19 +898,44 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
- if (!is_ap)
+ if (!is_ap) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_2;
- } else if (!is_ap) {
- /* If not 2x2, we need to indicate 1x1 in the
- * Midamble RX Max NSTS - but not for AP mode
- */
- iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
- ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
- iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
- iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+
+ if (iftype_data->eht_cap.has_eht) {
+ /*
+ * Set the number of sounding dimensions for each
+ * bandwidth to 1 to indicate the maximal supported
+ * value of TXVECTOR parameter NUM_STS of 2
+ */
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] |= 0x49;
+
+ /*
+ * Set the MAX NC to 1 to indicate sounding feedback of
+ * 2 supported by the beamfomee.
+ */
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10;
+ }
+ }
+ } else {
+ if (iftype_data->eht_cap.has_eht) {
+ struct ieee80211_eht_mcs_nss_supp *mcs_nss =
+ &iftype_data->eht_cap.eht_mcs_nss_supp;
+
+ memset(mcs_nss, 0x11, sizeof(*mcs_nss));
+ }
+
+ if (!is_ap) {
+ /* If not 2x2, we need to indicate 1x1 in the
+ * Midamble RX Max NSTS - but not for AP mode
+ */
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ }
}
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
@@ -792,6 +950,29 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
break;
}
+ if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iftype_data->eht_cap.has_eht) {
+ iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
+ ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
+ ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK);
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &=
+ ~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP);
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[5] &=
+ ~IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[6] &=
+ ~(IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP);
+ }
+
if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
@@ -816,8 +997,8 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
if (WARN_ON(sband->iftype_data))
return;
- BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_capa));
- BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_capa));
+ BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa));
+ BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa));
switch (sband->band) {
case NL80211_BAND_2GHZ:
@@ -832,13 +1013,13 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
return;
}
- memcpy(iftype_data, iwl_he_capa, sizeof(iwl_he_capa));
+ memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa));
sband->iftype_data = iftype_data;
- sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
+ sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa);
for (i = 0; i < sband->n_iftype_data; i++)
- iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
+ iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i],
tx_chains, rx_chains, fw);
iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 157d1f31c487..62ce116d3783 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,6 +368,7 @@ enum {
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
+#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
@@ -377,6 +378,7 @@ enum {
#define PREG_PRPH_WPROT_22000 0xA04D00
#define SB_MODIFY_CFG_FLAG 0xA03088
+#define SB_CFG_RESIDES_IN_OTP_MASK 0x10
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0
@@ -500,4 +502,7 @@ enum {
#define REG_OTP_MINOR 0xA0333C
+#define WFPM_LMAC2_PD_NOTIFICATION 0xA033CC
+#define WFPM_LMAC2_PD_RE_READ BIT(31)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index d659ccd065f7..32bd7f19f1d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1542,5 +1542,6 @@ void iwl_trans_free(struct iwl_trans *trans);
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
+void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
index 67122cfa2292..2b639eef595d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
@@ -220,6 +220,7 @@ struct iwl_mei_nvm {
/**
* enum iwl_mei_pairwise_cipher - cipher for UCAST key
* @IWL_MEI_CIPHER_NONE: none
+ * @IWL_MEI_CIPHER_TKIP: tkip
* @IWL_MEI_CIPHER_CCMP: ccmp
* @IWL_MEI_CIPHER_GCMP: gcmp
* @IWL_MEI_CIPHER_GCMP_256: gcmp 256
@@ -228,6 +229,7 @@ struct iwl_mei_nvm {
*/
enum iwl_mei_pairwise_cipher {
IWL_MEI_CIPHER_NONE = 0,
+ IWL_MEI_CIPHER_TKIP = 2,
IWL_MEI_CIPHER_CCMP = 4,
IWL_MEI_CIPHER_GCMP = 8,
IWL_MEI_CIPHER_GCMP_256 = 9,
@@ -446,9 +448,25 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
void iwl_mei_host_disassociated(void);
/**
- * iwl_mei_device_down() - must be called when the device is down
+ * iwl_mei_device_state() - must be called when the device changes up/down state
+ * @up: true if the device is up, false otherwise.
*/
-void iwl_mei_device_down(void);
+void iwl_mei_device_state(bool up);
+
+/**
+ * iwl_mei_pldr_req() - must be called before loading the fw
+ *
+ * Return: 0 if the PLDR flow was successful and the fw can be loaded, negative
+ * value otherwise.
+ */
+int iwl_mei_pldr_req(void);
+
+/**
+ * iwl_mei_alive_notif() - must be called when alive notificaiton is received
+ * @success: true if received alive notification, false if waiting for the
+ * notificaiton timed out.
+ */
+void iwl_mei_alive_notif(bool success);
#else
@@ -497,7 +515,13 @@ static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_
static inline void iwl_mei_host_disassociated(void)
{}
-static inline void iwl_mei_device_down(void)
+static inline void iwl_mei_device_state(bool up)
+{}
+
+static inline int iwl_mei_pldr_req(void)
+{ return 0; }
+
+static inline void iwl_mei_alive_notif(bool success)
{}
#endif /* CONFIG_IWLMEI */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index 357f14626cf4..b89989b6399a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -147,9 +147,15 @@ struct iwl_mei_filters {
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
* @link_prot_state: true when we are in link protection PASSIVE
+ * @device_down: true if the device is down. Used to remember to send
+ * CSME_OWNERSHIP_CONFIRMED when the driver is already down.
* @csa_throttle_end_wk: used when &csa_throttled is true
+ * @pldr_wq: the wait queue for PLDR flow
+ * @pldr_active: PLDR flow is in progress
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
+ * @netdev_work: used to defer registering and unregistering of the netdev to
+ * avoid taking the rtnl lock in the SAP messages handlers.
* @sap_seq_no: the sequence number for the SAP messages
* @seq_no: the sequence number for the SAP messages
* @dbgfs_dir: the debugfs dir entry
@@ -167,8 +173,12 @@ struct iwl_mei {
bool csa_throttled;
bool csme_taking_ownership;
bool link_prot_state;
+ bool device_down;
struct delayed_work csa_throttle_end_wk;
+ wait_queue_head_t pldr_wq;
+ bool pldr_active;
spinlock_t data_q_lock;
+ struct work_struct netdev_work;
atomic_t sap_seq_no;
atomic_t seq_no;
@@ -588,13 +598,38 @@ static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
return res;
}
+static void iwl_mei_netdev_work(struct work_struct *wk)
+{
+ struct iwl_mei *mei =
+ container_of(wk, struct iwl_mei, netdev_work);
+ struct net_device *netdev;
+
+ /*
+ * First take rtnl and only then the mutex to avoid an ABBA
+ * with iwl_mei_set_netdev()
+ */
+ rtnl_lock();
+ mutex_lock(&iwl_mei_mutex);
+
+ netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
+ lockdep_is_held(&iwl_mei_mutex));
+ if (netdev) {
+ if (mei->amt_enabled)
+ netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
+ mei);
+ else
+ netdev_rx_handler_unregister(netdev);
+ }
+
+ mutex_unlock(&iwl_mei_mutex);
+ rtnl_unlock();
+}
+
static void
iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
const struct iwl_sap_me_msg_start_ok *rsp,
ssize_t len)
{
- struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
-
if (len != sizeof(*rsp)) {
dev_err(&cldev->dev,
"got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
@@ -613,13 +648,10 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
mutex_lock(&iwl_mei_mutex);
set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
- /* wifi driver has registered already */
- if (iwl_mei_cache.ops) {
- iwl_mei_send_sap_msg(mei->cldev,
- SAP_MSG_NOTIF_WIFIDR_UP);
- iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
- }
-
+ /*
+ * We'll receive AMT_STATE SAP message in a bit and
+ * that will continue the flow
+ */
mutex_unlock(&iwl_mei_mutex);
}
@@ -712,6 +744,13 @@ static void iwl_mei_set_init_conf(struct iwl_mei *mei)
.val = cpu_to_le32(iwl_mei_cache.rf_kill),
};
+ /* wifi driver has registered already */
+ if (iwl_mei_cache.ops) {
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_WIFIDR_UP);
+ iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
+ }
+
iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
if (iwl_mei_cache.conn_info) {
@@ -738,38 +777,23 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
const struct iwl_sap_msg_dw *dw)
{
struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
- struct net_device *netdev;
- /*
- * First take rtnl and only then the mutex to avoid an ABBA
- * with iwl_mei_set_netdev()
- */
- rtnl_lock();
mutex_lock(&iwl_mei_mutex);
- netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
- lockdep_is_held(&iwl_mei_mutex));
-
if (mei->amt_enabled == !!le32_to_cpu(dw->val))
goto out;
mei->amt_enabled = dw->val;
- if (mei->amt_enabled) {
- if (netdev)
- netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
-
+ if (mei->amt_enabled)
iwl_mei_set_init_conf(mei);
- } else {
- if (iwl_mei_cache.ops)
- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
- if (netdev)
- netdev_rx_handler_unregister(netdev);
- }
+ else if (iwl_mei_cache.ops)
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
+
+ schedule_work(&mei->netdev_work);
out:
mutex_unlock(&iwl_mei_mutex);
- rtnl_unlock();
}
static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
@@ -798,14 +822,18 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
mei->got_ownership = false;
- /*
- * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver
- * is finished taking the device down.
- */
- mei->csme_taking_ownership = true;
+ if (iwl_mei_cache.ops && !mei->device_down) {
+ /*
+ * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
+ * driver is finished taking the device down.
+ */
+ mei->csme_taking_ownership = true;
- if (iwl_mei_cache.ops)
- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
+ } else {
+ iwl_mei_send_sap_msg(cldev,
+ SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
+ }
}
static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
@@ -857,6 +885,15 @@ static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
}
+static void iwl_mei_handle_pldr_ack(struct mei_cl_device *cldev,
+ const struct iwl_sap_pldr_ack_data *ack)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+
+ mei->pldr_active = le32_to_cpu(ack->status) == SAP_PLDR_STATUS_SUCCESS;
+ wake_up_all(&mei->pldr_wq);
+}
+
static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
const struct iwl_sap_hdr *hdr)
{
@@ -937,6 +974,8 @@ static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
iwl_mei_handle_can_release_ownership, 0);
SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
iwl_mei_handle_csme_taking_ownership, 0);
+ SAP_MSG_HANDLER(PLDR_ACK, iwl_mei_handle_pldr_ack,
+ sizeof(struct iwl_sap_pldr_ack_data));
default:
/*
* This is not really an error, there are message that we decided
@@ -1313,10 +1352,17 @@ out:
}
EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
-int iwl_mei_get_ownership(void)
+#define IWL_MEI_PLDR_NUM_RETRIES 3
+
+int iwl_mei_pldr_req(void)
{
struct iwl_mei *mei;
int ret;
+ struct iwl_sap_pldr_data msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ };
+ int i;
mutex_lock(&iwl_mei_mutex);
@@ -1338,22 +1384,34 @@ int iwl_mei_get_ownership(void)
goto out;
}
- if (mei->got_ownership) {
- ret = 0;
- goto out;
+ for (i = 0; i < IWL_MEI_PLDR_NUM_RETRIES; i++) {
+ ret = iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+ mutex_unlock(&iwl_mei_mutex);
+ if (ret)
+ return ret;
+
+ ret = wait_event_timeout(mei->pldr_wq, mei->pldr_active, HZ / 2);
+ if (ret)
+ break;
+
+ /* Take the mutex for the next iteration */
+ mutex_lock(&iwl_mei_mutex);
}
- ret = iwl_mei_send_sap_msg(mei->cldev,
- SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
if (ret)
- goto out;
+ return 0;
+ ret = -ETIMEDOUT;
+out:
mutex_unlock(&iwl_mei_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iwl_mei_pldr_req);
- ret = wait_event_timeout(mei->get_ownership_wq,
- mei->got_ownership, HZ / 2);
- if (!ret)
- return -ETIMEDOUT;
+int iwl_mei_get_ownership(void)
+{
+ struct iwl_mei *mei;
+ int ret;
mutex_lock(&iwl_mei_mutex);
@@ -1370,14 +1428,59 @@ int iwl_mei_get_ownership(void)
goto out;
}
- ret = !mei->got_ownership;
+ if (!mei->amt_enabled) {
+ ret = 0;
+ goto out;
+ }
+
+ if (mei->got_ownership) {
+ ret = 0;
+ goto out;
+ }
+ ret = iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
+ if (ret)
+ goto out;
+
+ mutex_unlock(&iwl_mei_mutex);
+
+ ret = wait_event_timeout(mei->get_ownership_wq,
+ mei->got_ownership, HZ / 2);
+ return (!ret) ? -ETIMEDOUT : 0;
out:
mutex_unlock(&iwl_mei_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
+void iwl_mei_alive_notif(bool success)
+{
+ struct iwl_mei *mei;
+ struct iwl_sap_pldr_end_data msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR_END),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ .status = success ? cpu_to_le32(SAP_PLDR_STATUS_SUCCESS) :
+ cpu_to_le32(SAP_PLDR_STATUS_FAILURE),
+ };
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ if (!mei || !mei->pldr_active)
+ goto out;
+
+ mei->pldr_active = false;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+out:
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_alive_notif);
+
void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
const struct iwl_mei_colloc_info *colloc_info)
{
@@ -1413,10 +1516,7 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
- if (!mei)
- goto out;
-
- if (!mei->amt_enabled)
+ if (!mei && !mei->amt_enabled)
goto out;
iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
@@ -1435,7 +1535,7 @@ void iwl_mei_host_disassociated(void)
struct iwl_sap_notif_host_link_down msg = {
.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
- .type = HOST_LINK_DOWN_TYPE_LONG,
+ .type = HOST_LINK_DOWN_TYPE_TEMPORARY,
};
mutex_lock(&iwl_mei_mutex);
@@ -1445,7 +1545,7 @@ void iwl_mei_host_disassociated(void)
mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
- if (!mei)
+ if (!mei && !mei->amt_enabled)
goto out;
iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
@@ -1481,7 +1581,7 @@ void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
- if (!mei)
+ if (!mei && !mei->amt_enabled)
goto out;
iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
@@ -1510,7 +1610,7 @@ void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
- if (!mei)
+ if (!mei && !mei->amt_enabled)
goto out;
iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
@@ -1538,7 +1638,7 @@ void iwl_mei_set_country_code(u16 mcc)
mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
- if (!mei)
+ if (!mei && !mei->amt_enabled)
goto out;
iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
@@ -1564,7 +1664,7 @@ void iwl_mei_set_power_limit(const __le16 *power_limit)
mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
- if (!mei)
+ if (!mei && !mei->amt_enabled)
goto out;
memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
@@ -1616,7 +1716,7 @@ out:
}
EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
-void iwl_mei_device_down(void)
+void iwl_mei_device_state(bool up)
{
struct iwl_mei *mei;
@@ -1630,7 +1730,9 @@ void iwl_mei_device_down(void)
if (!mei)
goto out;
- if (!mei->csme_taking_ownership)
+ mei->device_down = !up;
+
+ if (up || !mei->csme_taking_ownership)
goto out;
iwl_mei_send_sap_msg(mei->cldev,
@@ -1639,7 +1741,7 @@ void iwl_mei_device_down(void)
out:
mutex_unlock(&iwl_mei_mutex);
}
-EXPORT_SYMBOL_GPL(iwl_mei_device_down);
+EXPORT_SYMBOL_GPL(iwl_mei_device_state);
int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
{
@@ -1669,9 +1771,10 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
/* we have already a SAP connection */
if (iwl_mei_is_connected()) {
- iwl_mei_send_sap_msg(mei->cldev,
- SAP_MSG_NOTIF_WIFIDR_UP);
- ops->rfkill(priv, mei->link_prot_state);
+ if (mei->amt_enabled)
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state, false);
}
}
ret = 0;
@@ -1817,10 +1920,13 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
iwl_mei_csa_throttle_end_wk);
init_waitqueue_head(&mei->get_ownership_wq);
+ init_waitqueue_head(&mei->pldr_wq);
spin_lock_init(&mei->data_q_lock);
+ INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
+ mei->device_down = true;
do {
ret = iwl_mei_alloc_shared_mem(cldev);
@@ -1884,6 +1990,7 @@ free:
}
#define SEND_SAP_MAX_WAIT_ITERATION 10
+#define IWLMEI_DEVICE_DOWN_WAIT_ITERATION 50
static void iwl_mei_remove(struct mei_cl_device *cldev)
{
@@ -1894,8 +2001,26 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
* We are being removed while the bus is active, it means we are
* going to suspend/ shutdown, so the NIC will disappear.
*/
- if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops)
- iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
+ if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) {
+ unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
+ bool down = false;
+
+ /*
+ * In case of suspend, wait for the mac to stop and don't remove
+ * the interface. This will allow the interface to come back
+ * on resume.
+ */
+ while (!down && iter--) {
+ mdelay(1);
+
+ mutex_lock(&iwl_mei_mutex);
+ down = mei->device_down;
+ mutex_unlock(&iwl_mei_mutex);
+ }
+
+ if (!down)
+ iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
+ }
if (rcu_access_pointer(iwl_mei_cache.netdev)) {
struct net_device *dev;
@@ -1921,29 +2046,32 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
mutex_lock(&iwl_mei_mutex);
- /*
- * Tell CSME that we are going down so that it won't access the
- * memory anymore, make sure this message goes through immediately.
- */
- mei->csa_throttled = false;
- iwl_mei_send_sap_msg(mei->cldev,
- SAP_MSG_NOTIF_HOST_GOES_DOWN);
+ if (mei->amt_enabled) {
+ /*
+ * Tell CSME that we are going down so that it won't access the
+ * memory anymore, make sure this message goes through immediately.
+ */
+ mei->csa_throttled = false;
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_HOST_GOES_DOWN);
- for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
- if (!iwl_mei_host_to_me_data_pending(mei))
- break;
+ for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
+ if (!iwl_mei_host_to_me_data_pending(mei))
+ break;
- msleep(5);
- }
+ msleep(20);
+ }
- /*
- * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message,
- * it means that it will probably keep reading memory that we are going
- * to unmap and free, expect IOMMU error messages.
- */
- if (i == SEND_SAP_MAX_WAIT_ITERATION)
- dev_err(&mei->cldev->dev,
- "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
+ /*
+ * If we couldn't make sure that CSME saw the HOST_GOES_DOWN
+ * message, it means that it will probably keep reading memory
+ * that we are going to unmap and free, expect IOMMU error
+ * messages.
+ */
+ if (i == SEND_SAP_MAX_WAIT_ITERATION)
+ dev_err(&mei->cldev->dev,
+ "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
+ }
mutex_unlock(&iwl_mei_mutex);
@@ -1976,6 +2104,7 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
*/
cancel_work_sync(&mei->send_csa_msg_wk);
cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
+ cancel_work_sync(&mei->netdev_work);
/*
* If someone waits for the ownership, let him know that we are going
@@ -1983,6 +2112,7 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
* the device.
*/
wake_up_all(&mei->get_ownership_wq);
+ wake_up_all(&mei->pldr_wq);
mutex_lock(&iwl_mei_mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 3472167c8370..eac46d1a397a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <uapi/linux/if_ether.h>
@@ -337,10 +337,14 @@ rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
if (!*pass_to_csme)
return RX_HANDLER_PASS;
- if (ret == RX_HANDLER_PASS)
+ if (ret == RX_HANDLER_PASS) {
skb = skb_copy(orig_skb, GFP_ATOMIC);
- else
+
+ if (!skb)
+ return RX_HANDLER_PASS;
+ } else {
skb = orig_skb;
+ }
/* CSME wants the MAC header as well, push it back */
skb_push(skb, skb->data - skb_mac_header(skb));
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index be1456dea484..6c0ad4adbf32 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021 - 2022 Intel Corporation
*/
#ifndef __sap_h__
@@ -203,6 +203,7 @@ struct iwl_sap_me_msg_start_ok {
* @SAP_MSG_NOTIF_NIC_OWNER: Payload is a DW. See &enum iwl_sap_nic_owner.
* @SAP_MSG_NOTIF_CSME_CONN_STATUS: See &struct iwl_sap_notif_conn_status.
* @SAP_MSG_NOTIF_NVM: See &struct iwl_sap_nvm.
+ * @SAP_MSG_NOTIF_PLDR_ACK: See &struct iwl_sap_pldr_ack_data.
* @SAP_MSG_NOTIF_FROM_CSME_MAX: Not used.
*
* @SAP_MSG_NOTIF_FROM_HOST_MIN: Not used.
@@ -226,6 +227,8 @@ struct iwl_sap_me_msg_start_ok {
* @SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED: No payload.
* @SAP_MSG_NOTIF_SAR_LIMITS: See &struct iwl_sap_notif_sar_limits.
* @SAP_MSG_NOTIF_GET_NVM: No payload. Triggers %SAP_MSG_NOTIF_NVM.
+ * @SAP_MSG_NOTIF_PLDR: See &struct iwl_sap_pldr_data.
+ * @SAP_MSG_NOTIF_PLDR_END: See &struct iwl_sap_pldr_end_data.
* @SAP_MSG_NOTIF_FROM_HOST_MAX: Not used.
*
* @SAP_MSG_DATA_MIN: Not used.
@@ -258,6 +261,8 @@ enum iwl_sap_msg {
SAP_MSG_NOTIF_NIC_OWNER = 511,
SAP_MSG_NOTIF_CSME_CONN_STATUS = 512,
SAP_MSG_NOTIF_NVM = 513,
+ /* 514 - 517 not supported */
+ SAP_MSG_NOTIF_PLDR_ACK = 518,
SAP_MSG_NOTIF_FROM_CSME_MAX,
SAP_MSG_NOTIF_FROM_HOST_MIN = 1000,
@@ -279,6 +284,9 @@ enum iwl_sap_msg {
SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED = 1015,
SAP_MSG_NOTIF_SAR_LIMITS = 1016,
SAP_MSG_NOTIF_GET_NVM = 1017,
+ /* 1018 - 1023 not supported */
+ SAP_MSG_NOTIF_PLDR = 1024,
+ SAP_MSG_NOTIF_PLDR_END = 1025,
SAP_MSG_NOTIF_FROM_HOST_MAX,
SAP_MSG_DATA_MIN = 2000,
@@ -334,12 +342,14 @@ enum iwl_sap_wifi_auth_type {
/**
* enum iwl_sap_wifi_cipher_alg
* @SAP_WIFI_CIPHER_ALG_NONE: TBD
+ * @SAP_WIFI_CIPHER_ALG_TKIP: TBD
* @SAP_WIFI_CIPHER_ALG_CCMP: TBD
* @SAP_WIFI_CIPHER_ALG_GCMP: TBD
* @SAP_WIFI_CIPHER_ALG_GCMP_256: TBD
*/
enum iwl_sap_wifi_cipher_alg {
SAP_WIFI_CIPHER_ALG_NONE = IWL_MEI_CIPHER_NONE,
+ SAP_WIFI_CIPHER_ALG_TKIP = IWL_MEI_CIPHER_TKIP,
SAP_WIFI_CIPHER_ALG_CCMP = IWL_MEI_CIPHER_CCMP,
SAP_WIFI_CIPHER_ALG_GCMP = IWL_MEI_CIPHER_GCMP,
SAP_WIFI_CIPHER_ALG_GCMP_256 = IWL_MEI_CIPHER_GCMP_256,
@@ -730,4 +740,47 @@ struct iwl_sap_cb_data {
u8 payload[];
};
+/**
+ * struct iwl_sap_pldr_data - payload of %SAP_MSG_NOTIF_PLDR
+ * @hdr: The SAP header.
+ * @version: SAP message version
+ */
+struct iwl_sap_pldr_data {
+ struct iwl_sap_hdr hdr;
+ __le32 version;
+} __packed;
+
+/**
+ * enum iwl_sap_pldr_status -
+ * @SAP_PLDR_STATUS_SUCCESS: PLDR started/ended successfully
+ * @SAP_PLDR_STATUS_FAILURE: PLDR failed to start/end
+ */
+enum iwl_sap_pldr_status {
+ SAP_PLDR_STATUS_SUCCESS = 0,
+ SAP_PLDR_STATUS_FAILURE = 1,
+};
+
+/*
+ * struct iwl_sap_pldr_end_data - payload of %SAP_MSG_NOTIF_PLDR_END
+ * @hdr: The SAP header.
+ * @version: SAP message version
+ * @status: PLDR end status
+ */
+struct iwl_sap_pldr_end_data {
+ struct iwl_sap_hdr hdr;
+ __le32 version;
+ __le32 status;
+} __packed;
+
+/*
+ * struct iwl_sap_pldr_ack_data - payload of %SAP_MSG_NOTIF_PLDR_ACK
+ * @version: SAP message version
+ * @status: CSME accept/refuse to the PLDR request
+ */
+struct iwl_sap_pldr_ack_data {
+ struct iwl_sap_hdr hdr;
+ __le32 version;
+ __le32 status;
+} __packed;
+
#endif /* __sap_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 11e814b7cad0..b28fcf0cf9cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -7,6 +7,7 @@ iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-y += ftm-responder.o ftm-initiator.o
iwlmvm-y += rfi.o
+iwlmvm-y += mld-key.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1e8123140973..1ce9450e5add 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1248,7 +1248,7 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
mvmvif = iwl_mvm_vif_from_mac80211(vif);
info = IEEE80211_SKB_CB(beacon);
- rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+ rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
beacon_cmd.flags =
cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index f041e77af059..0a41cd0ab243 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -354,6 +354,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
+
+ if (mvm->trans->trans_cfg->device_family ==
+ IWL_DEVICE_FAMILY_AX210) {
+ /* print these registers regardless of alive fail/success */
+ IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION));
+ IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION));
+ IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n",
+ iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG));
+ IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
+ iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9));
+ }
+
if (ret) {
struct iwl_trans *trans = mvm->trans;
@@ -390,7 +404,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
- if (ret == -ETIMEDOUT)
+ if (ret == -ETIMEDOUT && !mvm->pldr_sync)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
@@ -404,6 +418,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
+ /* if reached this point, Alive notification was received */
+ iwl_mei_alive_notif(true);
+
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
if (ret) {
IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
@@ -1456,6 +1473,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
struct ieee80211_channel *chan;
struct cfg80211_chan_def chandef;
struct ieee80211_supported_band *sband = NULL;
+ u32 sb_cfg;
lockdep_assert_held(&mvm->mutex);
@@ -1463,15 +1481,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
return ret;
+ sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
+ mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK);
+ if (mvm->pldr_sync && iwl_mei_pldr_req())
+ return -EBUSY;
+
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- if (ret != -ERFKILL)
+ if (ret != -ERFKILL && !mvm->pldr_sync)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
+ /* FW loaded successfully */
+ mvm->pldr_sync = false;
+
iwl_get_shared_mem_conf(&mvm->fwrt);
ret = iwl_mvm_sf_update(mvm, NULL, false);
@@ -1665,6 +1691,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_rfi_send_config_cmd(mvm, NULL);
}
+ iwl_mvm_mei_device_state(mvm, true);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index de0c545d50fd..83abfe996138 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -788,14 +788,40 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
return ie - beacon;
}
-u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
- struct ieee80211_vif *vif)
+static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
{
+ struct ieee80211_supported_band *sband;
+ unsigned long basic = vif->bss_conf.basic_rates;
+ u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
u8 rate;
- if (info->band == NL80211_BAND_2GHZ && !vif->p2p)
- rate = IWL_FIRST_CCK_RATE;
- else
- rate = IWL_FIRST_OFDM_RATE;
+ u32 i;
+
+ sband = mvm->hw->wiphy->bands[info->band];
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ u16 hw = sband->bitrates[i].hw_value;
+
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ if (lowest_ofdm > hw)
+ lowest_ofdm = hw;
+ } else if (lowest_cck > hw) {
+ lowest_cck = hw;
+ }
+ }
+
+ if (info->band == NL80211_BAND_2GHZ && !vif->p2p) {
+ if (lowest_cck != IWL_RATE_COUNT)
+ rate = lowest_cck;
+ else if (lowest_ofdm != IWL_RATE_COUNT)
+ rate = lowest_ofdm;
+ else
+ rate = IWL_RATE_1M_INDEX;
+ } else if (lowest_ofdm != IWL_RATE_COUNT) {
+ rate = lowest_ofdm;
+ } else {
+ rate = IWL_RATE_6M_INDEX;
+ }
return rate;
}
@@ -812,6 +838,24 @@ u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
return flags;
}
+u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband =
+ mvm->hw->wiphy->bands[info->band];
+ u32 legacy = vif->bss_conf.beacon_tx_rate.control[info->band].legacy;
+
+ /* if beacon rate was configured try using it */
+ if (hweight32(legacy) == 1) {
+ u32 rate = ffs(legacy) - 1;
+
+ return sband->bitrates[rate].hw_value;
+ }
+
+ return iwl_mvm_mac_ctxt_get_lowest_rate(mvm, info, vif);
+}
+
static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
@@ -842,7 +886,7 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
RATE_MCS_ANT_POS);
- rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+ rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
tx->rate_n_flags |=
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate));
@@ -926,7 +970,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
struct iwl_mac_beacon_cmd beacon_cmd = {};
- u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+ u8 rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
u16 flags;
struct ieee80211_chanctx_conf *ctx;
int channel;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8464c9b7baf1..5273ade71117 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -374,6 +374,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
wiphy_ext_feature_set(hw->wiphy,
@@ -1065,6 +1068,16 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
if (!ret)
break;
+ /*
+ * In PLDR sync PCI re-enumeration is needed. no point to retry
+ * mac start before that.
+ */
+ if (mvm->pldr_sync) {
+ iwl_mei_alive_notif(false);
+ iwl_trans_pcie_remove(mvm->trans, true);
+ break;
+ }
+
IWL_ERR(mvm, "mac start retry %d\n", retry);
}
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
@@ -1822,7 +1835,8 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
- u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
+ bool inheritance)
{
int i;
@@ -1848,14 +1862,25 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
bw++) {
ru_index_tmp >>= 1;
- if (!(ru_index_tmp & 1))
- continue;
+ /*
+ * According to the 11be spec, if for a specific BW the PPE Thresholds
+ * isn't present - it should inherit the thresholds from the last
+ * BW for which we had PPE Thresholds. In 11ax though, we don't have
+ * this inheritance - continue in this case
+ */
+ if (!(ru_index_tmp & 1)) {
+ if (inheritance)
+ goto set_thresholds;
+ else
+ continue;
+ }
high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+set_thresholds:
pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
}
@@ -1864,7 +1889,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
- struct iwl_he_pkt_ext_v2 *pkt_ext)
+ struct iwl_he_pkt_ext_v2 *pkt_ext,
+ bool inheritance)
{
u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
u8 *ppe = &sta->deflink.he_cap.ppe_thres[0];
@@ -1874,7 +1900,8 @@ static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
/* Starting after PPE header */
u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
- iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
+ iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
+ inheritance);
}
static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
@@ -1885,16 +1912,18 @@ static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *p
int high_th = -1;
int i;
+ /* all the macros are the same for EHT and HE */
switch (nominal_padding) {
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_NONE;
break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
low_th = IWL_HE_PKT_EXT_BPSK;
high_th = IWL_HE_PKT_EXT_NONE;
break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_BPSK;
break;
@@ -1917,6 +1946,31 @@ static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *p
}
}
+static void iwl_mvm_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ int i;
+
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ if (nominal_padding >
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
+ else if (nominal_padding ==
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[0] == IWL_HE_PKT_EXT_NONE &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
+ }
+ }
+}
+
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
@@ -1940,6 +1994,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
void *cmd;
+ u8 nominal_padding;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
ver = 1;
@@ -2029,22 +2084,96 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
sizeof(sta_ctxt_cmd.pkt_ext));
- /* If PPE Thresholds exist, parse them into a FW-familiar format. */
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
- &sta_ctxt_cmd.pkt_ext);
- flags |= STA_CTXT_HE_PACKET_EXT;
- /* PPE Thresholds doesn't exist - set the API PPE values
- * according to Common Nominal Packet Padding fiels. */
- } else {
- u8 nominal_padding =
- u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
- if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ if (sta->deflink.eht_cap.has_eht) {
+ nominal_padding =
+ u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ /* If PPE Thresholds exists, parse them into a FW-familiar format. */
+ if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->deflink.eht_cap.eht_ppe_thres[0] &
+ IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &sta->deflink.eht_cap.eht_ppe_thres[0];
+ u8 ru_index_bitmap =
+ u16_get_bits(*ppe,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mvm_parse_ppe(mvm,
+ &sta_ctxt_cmd.pkt_ext,
+ nss, ru_index_bitmap, ppe,
+ ppe_pos_bit, true);
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ /* EHT PPE Thresholds doesn't exist - set the API according to HE PPE Tresholds*/
+ } else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ struct iwl_he_pkt_ext_v2 *pkt_ext =
+ &sta_ctxt_cmd.pkt_ext;
+
+ /*
+ * Even though HE Capabilities IE doesn't contain PPE
+ * Thresholds for BW 320Mhz, thresholds for this BW will
+ * be filled in with the same values as 160Mhz, due to
+ * the inheritance, as required.
+ */
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, pkt_ext,
+ true);
+
+ /*
+ * According to the requirements, for MCSs 12-13 the maximum value between
+ * HE PPE Threshold and Common Nominal Packet Padding needs to be taken
+ */
+ iwl_mvm_get_optimal_ppe_info(pkt_ext, nominal_padding);
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+
+ /*
+ * if PPE Thresholds doesn't present in both EHT IE and HE IE -
+ * take the Thresholds from Common Nominal Packet Padding field
+ */
+ } else {
iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
nominal_padding,
&flags);
+ }
+ } else if (sta->deflink.he_cap.has_he) {
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
+ &sta_ctxt_cmd.pkt_ext,
+ false);
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ /*
+ * PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding field.
+ */
+ } else {
+ nominal_padding =
+ u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
+ nominal_padding,
+ &flags);
+ }
+ }
+
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ int bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th =
+ &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0];
+
+ IWL_DEBUG_HT(mvm,
+ "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
+ i, bw, qam_th[0], qam_th[1]);
+ }
}
if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
@@ -2192,8 +2321,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* interface was added.
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
- if (vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax)
+ if ((vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax) ||
+ (vif->bss_conf.eht_support &&
+ !iwlwifi_mod_params.disable_11be))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@@ -2201,8 +2332,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
- vif->cfg.assoc && vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax)
+ vif->cfg.assoc &&
+ ((vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax) ||
+ (vif->bss_conf.eht_support &&
+ !iwlwifi_mod_params.disable_11be)))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
/*
@@ -2306,6 +2440,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
+ /* first remove remaining keys */
+ iwl_mvm_sec_key_remove_ap(mvm, vif);
+
/*
* Remove AP station now that
* the MAC is unassoc
@@ -3059,6 +3196,9 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
switch (mvm_sta->pairwise_cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_TKIP;
+ break;
case WLAN_CIPHER_SUITE_CCMP:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP;
break;
@@ -3209,8 +3349,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- if (vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax)
+ if ((vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax) ||
+ (vif->bss_conf.eht_support &&
+ !iwlwifi_mod_params.disable_11be))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
@@ -3461,6 +3603,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
struct iwl_mvm_sta *mvmsta = NULL;
struct iwl_mvm_key_pn *ptk_pn;
int keyidx = key->keyidx;
+ u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);
int ret, i;
u8 key_offset;
@@ -3600,7 +3744,12 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
mvmsta->pairwise_cipher = key->cipher;
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
- ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
+
+ if (sec_key_ver)
+ ret = iwl_mvm_sec_key_add(mvm, vif, sta, key);
+ else
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
+
if (ret) {
IWL_WARN(mvm, "set key failed\n");
key->hw_key_idx = STA_KEY_IDX_INVALID;
@@ -3653,7 +3802,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
}
IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
- ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+ if (sec_key_ver)
+ ret = iwl_mvm_sec_key_del(mvm, vif, sta, key);
+ else
+ ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
break;
default:
ret = -EINVAL;
@@ -3758,7 +3910,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
iwl_mvm_phy_band_from_nl80211(channel->band),
- PHY_VHT_CHANNEL_MODE20,
+ IWL_PHY_CHANNEL_MODE20,
0);
/* Set the time and duration */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
new file mode 100644
index 000000000000..e27c893502f7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <net/mac80211.h>
+#include "mvm.h"
+#include "fw/api/context.h"
+#include "fw/api/datapath.h"
+
+static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return BIT(mvmvif->mcast_sta.sta_id);
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ return BIT(mvmsta->sta_id);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA)
+ return BIT(mvmvif->ap_sta_id);
+
+ /* invalid */
+ return 0;
+}
+
+static u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 flags = 0;
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP104:
+ flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_WEP40:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_WEP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_CCMP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP;
+ break;
+ }
+
+ rcu_read_lock();
+ if (!sta && vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+ u8 sta_id = mvmvif->ap_sta_id;
+
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ }
+
+ if (!IS_ERR_OR_NULL(sta) && sta->mfp)
+ flags |= IWL_SEC_KEY_FLAG_MFP;
+ rcu_read_unlock();
+
+ return flags;
+}
+
+static int __iwl_mvm_sec_key_del(struct iwl_mvm *mvm, u32 sta_mask,
+ u32 key_flags, u32 keyidx, u32 flags)
+{
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .u.remove.sta_mask = cpu_to_le32(sta_mask),
+ .u.remove.key_id = cpu_to_le32(keyidx),
+ .u.remove.key_flags = cpu_to_le32(key_flags),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, flags, sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_sec_key_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf);
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .u.add.sta_mask = cpu_to_le32(sta_mask),
+ .u.add.key_id = cpu_to_le32(keyconf->keyidx),
+ .u.add.key_flags = cpu_to_le32(key_flags),
+ .u.add.tx_seq = cpu_to_le64(atomic64_read(&keyconf->tx_pn)),
+ };
+ int ret;
+
+ if (WARN_ON(keyconf->keylen > sizeof(cmd.u.add.key)))
+ return -EINVAL;
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ memcpy(cmd.u.add.key + IWL_SEC_WEP_KEY_OFFSET, keyconf->key,
+ keyconf->keylen);
+ else
+ memcpy(cmd.u.add.key, keyconf->key, keyconf->keylen);
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ memcpy(cmd.u.add.tkip_mic_rx_key,
+ keyconf->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ 8);
+ memcpy(cmd.u.add.tkip_mic_tx_key,
+ keyconf->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+ 8);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /*
+ * For WEP, the same key is used for multicast and unicast so need to
+ * upload it again. If this fails, remove the original as well.
+ */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ cmd.u.add.key_flags ^= cpu_to_le32(IWL_SEC_KEY_FLAG_MCAST_KEY);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ if (ret)
+ __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags,
+ keyconf->keyidx, 0);
+ }
+
+ return ret;
+}
+
+static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u32 flags)
+{
+ u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf);
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf);
+ int ret;
+
+ ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ flags);
+ if (ret)
+ return ret;
+
+ /* For WEP, delete the key again as unicast */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ key_flags ^= IWL_SEC_KEY_FLAG_MCAST_KEY;
+ ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags,
+ keyconf->keyidx, flags);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ return _iwl_mvm_sec_key_del(mvm, vif, sta, keyconf, 0);
+}
+
+static void iwl_mvm_sec_key_remove_ap_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ if (sta)
+ return;
+
+ _iwl_mvm_sec_key_del(mvm, vif, NULL, key, CMD_ASYNC);
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+ mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
+ return;
+
+ if (!sec_key_ver)
+ return;
+
+ ieee80211_iter_keys_rcu(mvm->hw, vif,
+ iwl_mvm_sec_key_remove_ap_iter,
+ NULL);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 97cba526e465..ce6b701f3f4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1105,6 +1105,8 @@ struct iwl_mvm {
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
+
+ bool pldr_sync;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1644,7 +1646,8 @@ int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
struct sk_buff *beacon,
void *data, int len);
-u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
+u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
struct ieee80211_vif *vif);
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw,
u8 rate_idx);
@@ -2079,6 +2082,18 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+/* new MLD related APIs */
+int iwl_mvm_sec_key_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
@@ -2201,10 +2216,10 @@ static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
iwl_mei_host_disassociated();
}
-static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
+static inline void iwl_mvm_mei_device_state(struct iwl_mvm *mvm, bool up)
{
if (mvm->mei_registered)
- iwl_mei_device_down();
+ iwl_mei_device_state(up);
}
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d2d42cd48af2..e78f5beaa2d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -547,6 +547,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(SEC_KEY_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -1076,6 +1077,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
+ u32 max_agg;
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
@@ -1097,12 +1099,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ max_agg = IEEE80211_MAX_AMPDU_BUF_EHT;
+ else
+ max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
+
+ hw->max_rx_aggregation_subframes = max_agg;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = max_agg;
op_mode = hw->priv;
@@ -1375,7 +1382,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
iwl_fw_dump_conf_clear(&mvm->fwrt);
- iwl_mvm_mei_device_down(mvm);
+ iwl_mvm_mei_device_state(mvm, false);
}
static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
@@ -1881,6 +1888,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ if (mvm->pldr_sync)
+ return;
+
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
!test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
&mvm->status))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index a3cefbc43e80..06f4203fb989 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -14,16 +14,18 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
- return PHY_VHT_CHANNEL_MODE20;
+ return IWL_PHY_CHANNEL_MODE20;
case NL80211_CHAN_WIDTH_40:
- return PHY_VHT_CHANNEL_MODE40;
+ return IWL_PHY_CHANNEL_MODE40;
case NL80211_CHAN_WIDTH_80:
- return PHY_VHT_CHANNEL_MODE80;
+ return IWL_PHY_CHANNEL_MODE80;
case NL80211_CHAN_WIDTH_160:
- return PHY_VHT_CHANNEL_MODE160;
+ return IWL_PHY_CHANNEL_MODE160;
+ case NL80211_CHAN_WIDTH_320:
+ return IWL_PHY_CHANNEL_MODE320;
default:
WARN(1, "Invalid channel width=%u", chandef->width);
- return PHY_VHT_CHANNEL_MODE20;
+ return IWL_PHY_CHANNEL_MODE20;
}
}
@@ -33,34 +35,32 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
*/
u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
{
- switch (chandef->chan->center_freq - chandef->center_freq1) {
- case -70:
- return PHY_VHT_CTRL_POS_4_BELOW;
- case -50:
- return PHY_VHT_CTRL_POS_3_BELOW;
- case -30:
- return PHY_VHT_CTRL_POS_2_BELOW;
- case -10:
- return PHY_VHT_CTRL_POS_1_BELOW;
- case 10:
- return PHY_VHT_CTRL_POS_1_ABOVE;
- case 30:
- return PHY_VHT_CTRL_POS_2_ABOVE;
- case 50:
- return PHY_VHT_CTRL_POS_3_ABOVE;
- case 70:
- return PHY_VHT_CTRL_POS_4_ABOVE;
- default:
- WARN(1, "Invalid channel definition");
- fallthrough;
- case 0:
+ int offs = chandef->chan->center_freq - chandef->center_freq1;
+ int abs_offs = abs(offs);
+ u8 ret;
+
+ if (offs == 0) {
/*
* The FW is expected to check the control channel position only
* when in HT/VHT and the channel width is not 20MHz. Return
* this value as the default one.
*/
- return PHY_VHT_CTRL_POS_1_BELOW;
+ return 0;
}
+
+ /* this results in a value 0-7, i.e. fitting into 0b0111 */
+ ret = (abs_offs - 10) / 20;
+ /*
+ * But we need the value to be in 0b1011 because 0b0100 is
+ * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
+ * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
+ */
+ ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
+ ((ret & BIT(2)) << 1);
+ /* and add the above bit */
+ ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
+
+ return ret;
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 2e9081cb6627..f30eeab5505b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -9,9 +9,11 @@
#include "iwl-op-mode.h"
#include "mvm.h"
-static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+static u8 rs_fw_bw_from_sta_bw(const struct ieee80211_sta *sta)
{
switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
+ return IWL_TLC_MNG_CH_WIDTH_320MHZ;
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
@@ -238,6 +240,122 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
+static u8 rs_fw_eht_max_nss(u8 rx_nss, u8 tx_nss)
+{
+ u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
+ /* the max nss that can be used,
+ * is the min with our tx capa and the peer rx capa.
+ */
+ return min(tx, rx);
+}
+
+#define MAX_NSS_MCS(mcs_num, rx, tx) \
+ rs_fw_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
+ (tx)->rx_tx_mcs ##mcs_num## _max_nss)
+
+static void rs_fw_set_eht_mcs_nss(__le16 ht_rates[][3],
+ enum IWL_TLC_MCS_PER_BW bw,
+ u8 max_nss, u16 mcs_msk)
+{
+ if (max_nss >= 2)
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+
+ if (max_nss >= 1)
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+}
+
+static const
+struct ieee80211_eht_mcs_nss_supp_bw *
+rs_fw_rs_mcs2eht_mcs(enum IWL_TLC_MCS_PER_BW bw,
+ const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
+{
+ switch (bw) {
+ case IWL_TLC_MCS_PER_BW_80:
+ return &eht_mcs->bw._80;
+ case IWL_TLC_MCS_PER_BW_160:
+ return &eht_mcs->bw._160;
+ case IWL_TLC_MCS_PER_BW_320:
+ return &eht_mcs->bw._320;
+ default:
+ return NULL;
+ }
+}
+
+static void rs_fw_eht_set_enabled_rates(const struct ieee80211_sta *sta,
+ struct ieee80211_supported_band *sband,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ /* peer RX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
+ &sta->deflink.eht_cap.eht_mcs_nss_supp;
+ /* our TX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
+ &sband->iftype_data->eht_cap.eht_mcs_nss_supp;
+
+ enum IWL_TLC_MCS_PER_BW bw;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
+
+ /* peer is 20Mhz only */
+ if (!(sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_rx_20 = eht_rx_mcs->only_20mhz;
+ } else {
+ mcs_rx_20.rx_tx_mcs7_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs9_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs11_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_rx_20.rx_tx_mcs13_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* nic is 20Mhz only */
+ if (!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_tx_20 = eht_tx_mcs->only_20mhz;
+ } else {
+ mcs_tx_20.rx_tx_mcs7_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs9_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs11_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_tx_20.rx_tx_mcs13_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* rates for 20/40/80 bw */
+ bw = IWL_TLC_MCS_PER_BW_80;
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), GENMASK(7, 0));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), GENMASK(9, 8));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), GENMASK(11, 10));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), GENMASK(13, 12));
+
+ /* rate for 160/320 bw */
+ for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
+ rs_fw_rs_mcs2eht_mcs(bw, eht_rx_mcs);
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
+ rs_fw_rs_mcs2eht_mcs(bw, eht_tx_mcs);
+
+ /* got unsuppored index for bw */
+ if (!mcs_rx || !mcs_tx)
+ continue;
+
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, mcs_rx, mcs_tx), GENMASK(9, 0));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, mcs_rx, mcs_tx), GENMASK(11, 10));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, mcs_rx, mcs_tx), GENMASK(13, 12));
+ }
+
+ /* the station support only a single receive chain */
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC ||
+ sta->deflink.rx_nss < 2)
+ memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
+ sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
+}
+
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
@@ -258,7 +376,10 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
/* HT/VHT rates */
- if (he_cap->has_he) {
+ if (sta->deflink.eht_cap.has_eht) {
+ cmd->mode = IWL_TLC_MNG_MODE_EHT;
+ rs_fw_eht_set_enabled_rates(sta, sband, cmd);
+ } else if (he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
rs_fw_he_set_enabled_rates(sta, sband, cmd);
} else if (vht_cap->vht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1aadccd8841f..5f782ca1e254 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1747,10 +1747,12 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
rx_status->rate_idx = rate;
- if (WARN_ONCE(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band))
+ if ((rate < 0 || rate > 0xFF) && net_ratelimit()) {
+ IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band);
rx_status->rate_idx = 0;
+ }
+
break;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index cbd8053a9e35..515dd3e0730d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1954,6 +1954,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (vif->cfg.assoc)
return ret;
+ /* first remove remaining keys */
+ iwl_mvm_sec_key_remove_ap(mvm, vif);
+
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 86d20e13bf47..fadaa683a416 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -183,7 +183,10 @@ static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
bool amsdu)
{
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ ||
+ (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ CSR_HW_REV_TYPE(mvm->trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ mvm->trans->hw_rev_step == SILICON_A_STEP))
return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu);
return iwl_mvm_tx_csum_bz(mvm, skb, amsdu);
}
@@ -1171,9 +1174,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
+ /*
+ * The IV is introduced by the HW for new tx api, and it is not present
+ * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
+ * IV for those devices.
+ */
if (ieee80211_is_data(fc))
iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
- info->control.hw_key ?
+ info->control.hw_key &&
+ !iwl_mvm_has_new_tx_api(mvm) ?
info->control.hw_key->iv_len : 0);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
@@ -1206,6 +1215,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
int ret;
+ struct sk_buff *orig_skb = skb;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1238,8 +1248,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
if (ret) {
+ /* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
- return ret;
+ /* skb here is not necessarily same as skb that entered this method,
+ * so free it explicitly.
+ */
+ if (skb == orig_skb)
+ ieee80211_free_txskb(mvm->hw, skb);
+ else
+ kfree_skb(skb);
+ /* there was error, but we consumed skb one way or another, so return 0 */
+ return 0;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index bd50f52a1aad..0a9af1ad1f20 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2052,6 +2052,7 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
struct iwl_trans_pcie_removal {
struct pci_dev *pdev;
struct work_struct work;
+ bool rescan;
};
static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
@@ -2060,18 +2061,61 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
+ struct pci_bus *bus = pdev->bus;
dev_err(&pdev->dev, "Device gone - attempting removal\n");
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
pci_lock_rescan_remove();
pci_dev_put(pdev);
pci_stop_and_remove_bus_device(pdev);
+ if (removal->rescan)
+ pci_rescan_bus(bus->parent);
pci_unlock_rescan_remove();
kfree(removal);
module_put(THIS_MODULE);
}
+void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
+{
+ struct iwl_trans_pcie_removal *removal;
+
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return;
+
+ IWL_ERR(trans, "Device gone - scheduling removal!\n");
+
+ /*
+ * get a module reference to avoid doing this
+ * while unloading anyway and to avoid
+ * scheduling a work with code that's being
+ * removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans,
+ "Module is being unloaded - abort\n");
+ return;
+ }
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ /*
+ * we don't need to clear this flag, because
+ * the trans will be freed and reallocated.
+ */
+ set_bit(STATUS_TRANS_DEAD, &trans->status);
+
+ removal->pdev = to_pci_dev(trans->dev);
+ removal->rescan = rescan;
+ INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+}
+EXPORT_SYMBOL(iwl_trans_pcie_remove);
+
/*
* This version doesn't disable BHs but rather assumes they're
* already disabled.
@@ -2131,47 +2175,12 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
iwl_trans_pcie_dump_regs(trans);
- if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
- struct iwl_trans_pcie_removal *removal;
-
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- goto err;
-
- IWL_ERR(trans, "Device gone - scheduling removal!\n");
-
- /*
- * get a module reference to avoid doing this
- * while unloading anyway and to avoid
- * scheduling a work with code that's being
- * removed.
- */
- if (!try_module_get(THIS_MODULE)) {
- IWL_ERR(trans,
- "Module is being unloaded - abort\n");
- goto err;
- }
-
- removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
- if (!removal) {
- module_put(THIS_MODULE);
- goto err;
- }
- /*
- * we don't need to clear this flag, because
- * the trans will be freed and reallocated.
- */
- set_bit(STATUS_TRANS_DEAD, &trans->status);
-
- removal->pdev = to_pci_dev(trans->dev);
- INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
- pci_dev_get(removal->pdev);
- schedule_work(&removal->work);
- } else {
+ if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
+ iwl_trans_pcie_remove(trans, false);
+ else
iwl_write32(trans, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
- }
-err:
spin_unlock(&trans_pcie->reg_lock);
return false;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 4e0a0c881697..26287b129d18 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -91,7 +91,7 @@ static int prism2_get_datarates(struct net_device *dev, u8 *rates)
static int prism2_get_name(struct net_device *dev,
struct iw_request_info *info,
- char *name, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
u8 rates[10];
int len, i, over2 = 0;
@@ -105,7 +105,7 @@ static int prism2_get_name(struct net_device *dev,
}
}
- strcpy(name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS");
+ strcpy(wrqu->name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS");
return 0;
}
@@ -113,8 +113,9 @@ static int prism2_get_name(struct net_device *dev,
static int prism2_ioctl_siwencode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq, char *keybuf)
+ union iwreq_data *wrqu, char *keybuf)
{
+ struct iw_point *erq = &wrqu->encoding;
struct hostap_interface *iface;
local_info_t *local;
int i;
@@ -215,8 +216,9 @@ static int prism2_ioctl_siwencode(struct net_device *dev,
static int prism2_ioctl_giwencode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq, char *key)
+ union iwreq_data *wrqu, char *key)
{
+ struct iw_point *erq = &wrqu->encoding;
struct hostap_interface *iface;
local_info_t *local;
int i, len;
@@ -321,8 +323,9 @@ static int hostap_set_rate(struct net_device *dev)
static int prism2_ioctl_siwrate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->bitrate;
struct hostap_interface *iface;
local_info_t *local;
@@ -381,8 +384,9 @@ static int prism2_ioctl_siwrate(struct net_device *dev,
static int prism2_ioctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->bitrate;
u16 val;
struct hostap_interface *iface;
local_info_t *local;
@@ -440,8 +444,9 @@ static int prism2_ioctl_giwrate(struct net_device *dev,
static int prism2_ioctl_siwsens(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *sens, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *sens = &wrqu->sens;
struct hostap_interface *iface;
local_info_t *local;
@@ -461,8 +466,9 @@ static int prism2_ioctl_siwsens(struct net_device *dev,
static int prism2_ioctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *sens, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *sens = &wrqu->sens;
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
@@ -485,8 +491,9 @@ static int prism2_ioctl_giwsens(struct net_device *dev,
/* Deprecated in new wireless extension API */
static int prism2_ioctl_giwaplist(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
struct sockaddr *addr;
@@ -526,8 +533,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
static int prism2_ioctl_siwrts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rts, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rts = &wrqu->rts;
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
@@ -553,8 +561,9 @@ static int prism2_ioctl_siwrts(struct net_device *dev,
static int prism2_ioctl_giwrts(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rts, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rts = &wrqu->rts;
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
@@ -576,8 +585,9 @@ static int prism2_ioctl_giwrts(struct net_device *dev,
static int prism2_ioctl_siwfrag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rts, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rts = &wrqu->rts;
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
@@ -603,8 +613,9 @@ static int prism2_ioctl_siwfrag(struct net_device *dev,
static int prism2_ioctl_giwfrag(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rts, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rts = &wrqu->rts;
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
@@ -669,8 +680,9 @@ static int hostap_join_ap(struct net_device *dev)
static int prism2_ioctl_siwap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct sockaddr *ap_addr = &wrqu->ap_addr;
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
@@ -709,8 +721,9 @@ static int prism2_ioctl_siwap(struct net_device *dev,
static int prism2_ioctl_giwap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct sockaddr *ap_addr = &wrqu->ap_addr;
struct hostap_interface *iface;
local_info_t *local;
@@ -745,8 +758,9 @@ static int prism2_ioctl_giwap(struct net_device *dev,
static int prism2_ioctl_siwnickn(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *nickname)
+ union iwreq_data *wrqu, char *nickname)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
@@ -766,8 +780,9 @@ static int prism2_ioctl_siwnickn(struct net_device *dev,
static int prism2_ioctl_giwnickn(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *nickname)
+ union iwreq_data *wrqu, char *nickname)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
int len;
@@ -793,8 +808,9 @@ static int prism2_ioctl_giwnickn(struct net_device *dev,
static int prism2_ioctl_siwfreq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_freq *freq = &wrqu->freq;
struct hostap_interface *iface;
local_info_t *local;
@@ -830,8 +846,9 @@ static int prism2_ioctl_siwfreq(struct net_device *dev,
static int prism2_ioctl_giwfreq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_freq *freq = &wrqu->freq;
struct hostap_interface *iface;
local_info_t *local;
u16 val;
@@ -874,8 +891,9 @@ static void hostap_monitor_set_type(local_info_t *local)
static int prism2_ioctl_siwessid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *ssid)
+ union iwreq_data *wrqu, char *ssid)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
@@ -910,8 +928,9 @@ static int prism2_ioctl_siwessid(struct net_device *dev,
static int prism2_ioctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *essid)
+ union iwreq_data *wrqu, char *essid)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
u16 val;
@@ -946,8 +965,9 @@ static int prism2_ioctl_giwessid(struct net_device *dev,
static int prism2_ioctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
struct iw_range *range = (struct iw_range *) extra;
@@ -1121,8 +1141,9 @@ static int hostap_monitor_mode_disable(local_info_t *local)
static int prism2_ioctl_siwmode(struct net_device *dev,
struct iw_request_info *info,
- __u32 *mode, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ __u32 *mode = &wrqu->mode;
struct hostap_interface *iface;
local_info_t *local;
int double_reset = 0;
@@ -1197,8 +1218,9 @@ static int prism2_ioctl_siwmode(struct net_device *dev,
static int prism2_ioctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
- __u32 *mode, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ __u32 *mode = &wrqu->mode;
struct hostap_interface *iface;
local_info_t *local;
@@ -1222,8 +1244,9 @@ static int prism2_ioctl_giwmode(struct net_device *dev,
static int prism2_ioctl_siwpower(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *wrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *wrq = &wrqu->power;
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
@@ -1281,8 +1304,9 @@ static int prism2_ioctl_siwpower(struct net_device *dev,
static int prism2_ioctl_giwpower(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->power;
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
@@ -1339,8 +1363,9 @@ static int prism2_ioctl_giwpower(struct net_device *dev,
static int prism2_ioctl_siwretry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->retry;
struct hostap_interface *iface;
local_info_t *local;
@@ -1400,8 +1425,9 @@ static int prism2_ioctl_siwretry(struct net_device *dev,
static int prism2_ioctl_giwretry(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->retry;
struct hostap_interface *iface;
local_info_t *local;
__le16 shortretry, longretry, lifetime, altretry;
@@ -1494,8 +1520,9 @@ static u16 prism2_txpower_dBm_to_hfa386x(int val)
static int prism2_ioctl_siwtxpow(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->txpower;
struct hostap_interface *iface;
local_info_t *local;
#ifdef RAW_TXPOWER_SETTING
@@ -1575,9 +1602,10 @@ static int prism2_ioctl_siwtxpow(struct net_device *dev,
static int prism2_ioctl_giwtxpow(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
#ifdef RAW_TXPOWER_SETTING
+ struct iw_param *rrq = &wrqu->txpower;
struct hostap_interface *iface;
local_info_t *local;
u16 resp0;
@@ -1710,8 +1738,9 @@ static inline int prism2_request_scan(struct net_device *dev)
static int prism2_ioctl_siwscan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
int ret;
@@ -2057,8 +2086,9 @@ static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
static int prism2_ioctl_giwscan(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface;
local_info_t *local;
int res;
@@ -2303,7 +2333,7 @@ static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i)
static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu, char *extra)
+ union iwreq_data *uwrq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
@@ -2654,7 +2684,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
@@ -2841,7 +2871,7 @@ static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
static int prism2_ioctl_priv_readmif(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
@@ -2862,7 +2892,7 @@ static int prism2_ioctl_priv_readmif(struct net_device *dev,
static int prism2_ioctl_priv_writemif(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
@@ -2885,7 +2915,7 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
struct hostap_interface *iface;
local_info_t *local;
int ret = 0;
- u32 mode;
+ union iwreq_data wrqu;
iface = netdev_priv(dev);
local = iface->local;
@@ -2899,8 +2929,8 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
if (*i == 0) {
/* Disable monitor mode - old mode was not saved, so go to
* Master mode */
- mode = IW_MODE_MASTER;
- ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
+ wrqu.mode = IW_MODE_MASTER;
+ ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL);
} else if (*i == 1) {
/* netlink socket mode is not supported anymore since it did
* not separate different devices from each other and was not
@@ -2916,8 +2946,8 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
local->monitor_type = PRISM2_MONITOR_PRISM;
break;
}
- mode = IW_MODE_MONITOR;
- ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
+ wrqu.mode = IW_MODE_MONITOR;
+ ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL);
hostap_monitor_mode_enable(local);
} else
ret = -EINVAL;
@@ -3079,8 +3109,9 @@ static int prism2_set_genericelement(struct net_device *dev, u8 *elem,
static int prism2_ioctl_siwauth(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *data = &wrqu->param;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
@@ -3145,8 +3176,9 @@ static int prism2_ioctl_siwauth(struct net_device *dev,
static int prism2_ioctl_giwauth(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *data = &wrqu->param;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
@@ -3184,8 +3216,9 @@ static int prism2_ioctl_giwauth(struct net_device *dev,
static int prism2_ioctl_siwencodeext(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *erq = &wrqu->encoding;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
@@ -3358,8 +3391,9 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
static int prism2_ioctl_giwencodeext(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *erq = &wrqu->encoding;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct lib80211_crypt_data **crypt;
@@ -3666,16 +3700,18 @@ static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
static int prism2_ioctl_siwgenie(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *data = &wrqu->data;
return prism2_set_genericelement(dev, extra, data->length);
}
static int prism2_ioctl_giwgenie(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *data = &wrqu->data;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
int len = local->generic_elem_len - 2;
@@ -3713,7 +3749,7 @@ static int prism2_ioctl_set_generic_element(local_info_t *local,
static int prism2_ioctl_siwmlme(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
@@ -3864,70 +3900,56 @@ const struct ethtool_ops prism2_ethtool_ops = {
static const iw_handler prism2_handler[] =
{
- (iw_handler) NULL, /* SIOCSIWCOMMIT */
- (iw_handler) prism2_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) prism2_ioctl_siwfreq, /* SIOCSIWFREQ */
- (iw_handler) prism2_ioctl_giwfreq, /* SIOCGIWFREQ */
- (iw_handler) prism2_ioctl_siwmode, /* SIOCSIWMODE */
- (iw_handler) prism2_ioctl_giwmode, /* SIOCGIWMODE */
- (iw_handler) prism2_ioctl_siwsens, /* SIOCSIWSENS */
- (iw_handler) prism2_ioctl_giwsens, /* SIOCGIWSENS */
- (iw_handler) NULL /* not used */, /* SIOCSIWRANGE */
- (iw_handler) prism2_ioctl_giwrange, /* SIOCGIWRANGE */
- (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */
- (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
- (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */
- (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
- (iw_handler) prism2_ioctl_siwap, /* SIOCSIWAP */
- (iw_handler) prism2_ioctl_giwap, /* SIOCGIWAP */
- (iw_handler) prism2_ioctl_siwmlme, /* SIOCSIWMLME */
- (iw_handler) prism2_ioctl_giwaplist, /* SIOCGIWAPLIST */
- (iw_handler) prism2_ioctl_siwscan, /* SIOCSIWSCAN */
- (iw_handler) prism2_ioctl_giwscan, /* SIOCGIWSCAN */
- (iw_handler) prism2_ioctl_siwessid, /* SIOCSIWESSID */
- (iw_handler) prism2_ioctl_giwessid, /* SIOCGIWESSID */
- (iw_handler) prism2_ioctl_siwnickn, /* SIOCSIWNICKN */
- (iw_handler) prism2_ioctl_giwnickn, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) prism2_ioctl_siwrate, /* SIOCSIWRATE */
- (iw_handler) prism2_ioctl_giwrate, /* SIOCGIWRATE */
- (iw_handler) prism2_ioctl_siwrts, /* SIOCSIWRTS */
- (iw_handler) prism2_ioctl_giwrts, /* SIOCGIWRTS */
- (iw_handler) prism2_ioctl_siwfrag, /* SIOCSIWFRAG */
- (iw_handler) prism2_ioctl_giwfrag, /* SIOCGIWFRAG */
- (iw_handler) prism2_ioctl_siwtxpow, /* SIOCSIWTXPOW */
- (iw_handler) prism2_ioctl_giwtxpow, /* SIOCGIWTXPOW */
- (iw_handler) prism2_ioctl_siwretry, /* SIOCSIWRETRY */
- (iw_handler) prism2_ioctl_giwretry, /* SIOCGIWRETRY */
- (iw_handler) prism2_ioctl_siwencode, /* SIOCSIWENCODE */
- (iw_handler) prism2_ioctl_giwencode, /* SIOCGIWENCODE */
- (iw_handler) prism2_ioctl_siwpower, /* SIOCSIWPOWER */
- (iw_handler) prism2_ioctl_giwpower, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) prism2_ioctl_siwgenie, /* SIOCSIWGENIE */
- (iw_handler) prism2_ioctl_giwgenie, /* SIOCGIWGENIE */
- (iw_handler) prism2_ioctl_siwauth, /* SIOCSIWAUTH */
- (iw_handler) prism2_ioctl_giwauth, /* SIOCGIWAUTH */
- (iw_handler) prism2_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) prism2_ioctl_giwencodeext, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
- (iw_handler) NULL, /* -- hole -- */
+ IW_HANDLER(SIOCGIWNAME, prism2_get_name),
+ IW_HANDLER(SIOCSIWFREQ, prism2_ioctl_siwfreq),
+ IW_HANDLER(SIOCGIWFREQ, prism2_ioctl_giwfreq),
+ IW_HANDLER(SIOCSIWMODE, prism2_ioctl_siwmode),
+ IW_HANDLER(SIOCGIWMODE, prism2_ioctl_giwmode),
+ IW_HANDLER(SIOCSIWSENS, prism2_ioctl_siwsens),
+ IW_HANDLER(SIOCGIWSENS, prism2_ioctl_giwsens),
+ IW_HANDLER(SIOCGIWRANGE, prism2_ioctl_giwrange),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, prism2_ioctl_siwap),
+ IW_HANDLER(SIOCGIWAP, prism2_ioctl_giwap),
+ IW_HANDLER(SIOCSIWMLME, prism2_ioctl_siwmlme),
+ IW_HANDLER(SIOCGIWAPLIST, prism2_ioctl_giwaplist),
+ IW_HANDLER(SIOCSIWSCAN, prism2_ioctl_siwscan),
+ IW_HANDLER(SIOCGIWSCAN, prism2_ioctl_giwscan),
+ IW_HANDLER(SIOCSIWESSID, prism2_ioctl_siwessid),
+ IW_HANDLER(SIOCGIWESSID, prism2_ioctl_giwessid),
+ IW_HANDLER(SIOCSIWNICKN, prism2_ioctl_siwnickn),
+ IW_HANDLER(SIOCGIWNICKN, prism2_ioctl_giwnickn),
+ IW_HANDLER(SIOCSIWRATE, prism2_ioctl_siwrate),
+ IW_HANDLER(SIOCGIWRATE, prism2_ioctl_giwrate),
+ IW_HANDLER(SIOCSIWRTS, prism2_ioctl_siwrts),
+ IW_HANDLER(SIOCGIWRTS, prism2_ioctl_giwrts),
+ IW_HANDLER(SIOCSIWFRAG, prism2_ioctl_siwfrag),
+ IW_HANDLER(SIOCGIWFRAG, prism2_ioctl_giwfrag),
+ IW_HANDLER(SIOCSIWTXPOW, prism2_ioctl_siwtxpow),
+ IW_HANDLER(SIOCGIWTXPOW, prism2_ioctl_giwtxpow),
+ IW_HANDLER(SIOCSIWRETRY, prism2_ioctl_siwretry),
+ IW_HANDLER(SIOCGIWRETRY, prism2_ioctl_giwretry),
+ IW_HANDLER(SIOCSIWENCODE, prism2_ioctl_siwencode),
+ IW_HANDLER(SIOCGIWENCODE, prism2_ioctl_giwencode),
+ IW_HANDLER(SIOCSIWPOWER, prism2_ioctl_siwpower),
+ IW_HANDLER(SIOCGIWPOWER, prism2_ioctl_giwpower),
+ IW_HANDLER(SIOCSIWGENIE, prism2_ioctl_siwgenie),
+ IW_HANDLER(SIOCGIWGENIE, prism2_ioctl_giwgenie),
+ IW_HANDLER(SIOCSIWAUTH, prism2_ioctl_siwauth),
+ IW_HANDLER(SIOCGIWAUTH, prism2_ioctl_giwauth),
+ IW_HANDLER(SIOCSIWENCODEEXT, prism2_ioctl_siwencodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, prism2_ioctl_giwencodeext),
};
static const iw_handler prism2_private_handler[] =
-{ /* SIOCIWFIRSTPRIV + */
- (iw_handler) prism2_ioctl_priv_prism2_param, /* 0 */
- (iw_handler) prism2_ioctl_priv_get_prism2_param, /* 1 */
- (iw_handler) prism2_ioctl_priv_writemif, /* 2 */
- (iw_handler) prism2_ioctl_priv_readmif, /* 3 */
+{ /* SIOCIWFIRSTPRIV + */
+ prism2_ioctl_priv_prism2_param, /* 0 */
+ prism2_ioctl_priv_get_prism2_param, /* 1 */
+ prism2_ioctl_priv_writemif, /* 2 */
+ prism2_ioctl_priv_readmif, /* 3 */
};
const struct iw_handler_def hostap_iw_handler_def =
@@ -3935,8 +3957,8 @@ const struct iw_handler_def hostap_iw_handler_def =
.num_standard = ARRAY_SIZE(prism2_handler),
.num_private = ARRAY_SIZE(prism2_private_handler),
.num_private_args = ARRAY_SIZE(prism2_priv),
- .standard = (iw_handler *) prism2_handler,
- .private = (iw_handler *) prism2_private_handler,
+ .standard = prism2_handler,
+ .private = prism2_private_handler,
.private_args = (struct iw_priv_args *) prism2_priv,
.get_wireless_stats = hostap_get_wireless_stats,
};
diff --git a/drivers/net/wireless/intersil/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c
index 4a01260027bc..dea1ff044342 100644
--- a/drivers/net/wireless/intersil/orinoco/wext.c
+++ b/drivers/net/wireless/intersil/orinoco/wext.c
@@ -154,9 +154,10 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
static int orinoco_ioctl_setwap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *ap_addr,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct sockaddr *ap_addr = &wrqu->ap_addr;
struct orinoco_private *priv = ndev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
unsigned long flags;
@@ -213,9 +214,10 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
static int orinoco_ioctl_getwap(struct net_device *dev,
struct iw_request_info *info,
- struct sockaddr *ap_addr,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct sockaddr *ap_addr = &wrqu->ap_addr;
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
@@ -234,9 +236,10 @@ static int orinoco_ioctl_getwap(struct net_device *dev,
static int orinoco_ioctl_setiwencode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq,
+ union iwreq_data *wrqu,
char *keybuf)
{
+ struct iw_point *erq = &wrqu->encoding;
struct orinoco_private *priv = ndev_priv(dev);
int index = (erq->flags & IW_ENCODE_INDEX) - 1;
int setindex = priv->tx_key;
@@ -325,9 +328,10 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
static int orinoco_ioctl_getiwencode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq,
+ union iwreq_data *wrqu,
char *keybuf)
{
+ struct iw_point *erq = &wrqu->encoding;
struct orinoco_private *priv = ndev_priv(dev);
int index = (erq->flags & IW_ENCODE_INDEX) - 1;
unsigned long flags;
@@ -361,9 +365,10 @@ static int orinoco_ioctl_getiwencode(struct net_device *dev,
static int orinoco_ioctl_setessid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq,
+ union iwreq_data *wrqu,
char *essidbuf)
{
+ struct iw_point *erq = &wrqu->essid;
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -392,9 +397,10 @@ static int orinoco_ioctl_setessid(struct net_device *dev,
static int orinoco_ioctl_getessid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *erq,
+ union iwreq_data *wrqu,
char *essidbuf)
{
+ struct iw_point *erq = &wrqu->essid;
struct orinoco_private *priv = ndev_priv(dev);
int active;
int err = 0;
@@ -420,9 +426,10 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
static int orinoco_ioctl_setfreq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *frq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_freq *frq = &wrqu->freq;
struct orinoco_private *priv = ndev_priv(dev);
int chan = -1;
unsigned long flags;
@@ -469,9 +476,10 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
static int orinoco_ioctl_getfreq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *frq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_freq *frq = &wrqu->freq;
struct orinoco_private *priv = ndev_priv(dev);
int tmp;
@@ -488,9 +496,10 @@ static int orinoco_ioctl_getfreq(struct net_device *dev,
static int orinoco_ioctl_getsens(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *srq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *srq = &wrqu->sens;
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
u16 val;
@@ -517,9 +526,10 @@ static int orinoco_ioctl_getsens(struct net_device *dev,
static int orinoco_ioctl_setsens(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *srq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *srq = &wrqu->sens;
struct orinoco_private *priv = ndev_priv(dev);
int val = srq->value;
unsigned long flags;
@@ -540,9 +550,10 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
static int orinoco_ioctl_setrate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *rrq = &wrqu->bitrate;
struct orinoco_private *priv = ndev_priv(dev);
int ratemode;
int bitrate; /* 100s of kilobits */
@@ -574,9 +585,10 @@ static int orinoco_ioctl_setrate(struct net_device *dev,
static int orinoco_ioctl_getrate(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *rrq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *rrq = &wrqu->bitrate;
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
int bitrate, automatic;
@@ -610,9 +622,10 @@ static int orinoco_ioctl_getrate(struct net_device *dev,
static int orinoco_ioctl_setpower(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *prq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *prq = &wrqu->power;
struct orinoco_private *priv = ndev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
unsigned long flags;
@@ -664,9 +677,10 @@ static int orinoco_ioctl_setpower(struct net_device *dev,
static int orinoco_ioctl_getpower(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *prq,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_param *prq = &wrqu->power;
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
int err = 0;
@@ -1097,7 +1111,7 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
static int orinoco_ioctl_reset(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1121,7 +1135,7 @@ static int orinoco_ioctl_reset(struct net_device *dev,
static int orinoco_ioctl_setibssport(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
@@ -1143,7 +1157,7 @@ static int orinoco_ioctl_setibssport(struct net_device *dev,
static int orinoco_ioctl_getibssport(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1155,7 +1169,7 @@ static int orinoco_ioctl_getibssport(struct net_device *dev,
static int orinoco_ioctl_setport3(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1201,7 +1215,7 @@ static int orinoco_ioctl_setport3(struct net_device *dev,
static int orinoco_ioctl_getport3(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1213,7 +1227,7 @@ static int orinoco_ioctl_getport3(struct net_device *dev,
static int orinoco_ioctl_setpreamble(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1245,7 +1259,7 @@ static int orinoco_ioctl_setpreamble(struct net_device *dev,
static int orinoco_ioctl_getpreamble(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1265,9 +1279,10 @@ static int orinoco_ioctl_getpreamble(struct net_device *dev,
* For Wireless Tools 25 and 26 append "dummy" are the end. */
static int orinoco_ioctl_getrid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *data,
+ union iwreq_data *wrqu,
char *extra)
{
+ struct iw_point *data = &wrqu->data;
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
int rid = data->flags;
@@ -1303,7 +1318,7 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
/* Commit handler, called after set operations */
static int orinoco_ioctl_commit(struct net_device *dev,
struct iw_request_info *info,
- void *wrqu,
+ union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -1347,36 +1362,36 @@ static const struct iw_priv_args orinoco_privtab[] = {
*/
static const iw_handler orinoco_handler[] = {
- IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
- IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
- IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
- IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
- IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
- IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
- IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
- IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
- IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
+ IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
+ IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
+ IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
+ IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode),
+ IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode),
+ IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
+ IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
+ IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange),
IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
- IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
- IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
- IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
- IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
- IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
- IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
- IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
- IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
- IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
- IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
- IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
- IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
- IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
- IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
- IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
- IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
+ IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
+ IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
+ IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan),
+ IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan),
+ IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
+ IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
+ IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
+ IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
+ IW_HANDLER(SIOCSIWRTS, cfg80211_wext_siwrts),
+ IW_HANDLER(SIOCGIWRTS, cfg80211_wext_giwrts),
+ IW_HANDLER(SIOCSIWFRAG, cfg80211_wext_siwfrag),
+ IW_HANDLER(SIOCGIWFRAG, cfg80211_wext_giwfrag),
+ IW_HANDLER(SIOCGIWRETRY, cfg80211_wext_giwretry),
+ IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
+ IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
+ IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
+ IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
@@ -1391,15 +1406,15 @@ static const iw_handler orinoco_handler[] = {
Added typecasting since we no longer use iwreq_data -- Moustafa
*/
static const iw_handler orinoco_private_handler[] = {
- [0] = (iw_handler)orinoco_ioctl_reset,
- [1] = (iw_handler)orinoco_ioctl_reset,
- [2] = (iw_handler)orinoco_ioctl_setport3,
- [3] = (iw_handler)orinoco_ioctl_getport3,
- [4] = (iw_handler)orinoco_ioctl_setpreamble,
- [5] = (iw_handler)orinoco_ioctl_getpreamble,
- [6] = (iw_handler)orinoco_ioctl_setibssport,
- [7] = (iw_handler)orinoco_ioctl_getibssport,
- [9] = (iw_handler)orinoco_ioctl_getrid,
+ [0] = orinoco_ioctl_reset,
+ [1] = orinoco_ioctl_reset,
+ [2] = orinoco_ioctl_setport3,
+ [3] = orinoco_ioctl_getport3,
+ [4] = orinoco_ioctl_setpreamble,
+ [5] = orinoco_ioctl_getpreamble,
+ [6] = orinoco_ioctl_setibssport,
+ [7] = orinoco_ioctl_getibssport,
+ [9] = orinoco_ioctl_getrid,
};
const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/intersil/p54/eeprom.h b/drivers/net/wireless/intersil/p54/eeprom.h
index 1d0aaf54389a..641c4e79879e 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.h
+++ b/drivers/net/wireless/intersil/p54/eeprom.h
@@ -108,10 +108,10 @@ struct pda_country {
} __packed;
struct pda_antenna_gain {
- struct {
+ DECLARE_FLEX_ARRAY(struct {
u8 gain_5GHz; /* 0.25 dBi units */
u8 gain_2GHz; /* 0.25 dBi units */
- } __packed antenna[0];
+ } __packed, antenna);
} __packed;
struct pda_custom_wrapper {
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index e127453ab51a..c6084683aedd 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -705,6 +705,7 @@ static void p54_set_coverage_class(struct ieee80211_hw *dev,
static const struct ieee80211_ops p54_ops = {
.tx = p54_tx_80211,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = p54_start,
.stop = p54_stop,
.add_interface = p54_add_interface,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0d81098c7b45..c57c8903b7c0 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3109,6 +3109,7 @@ static int mac80211_hwsim_change_sta_links(struct ieee80211_hw *hw,
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue, \
.start = mac80211_hwsim_start, \
.stop = mac80211_hwsim_stop, \
.add_interface = mac80211_hwsim_add_interface, \
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 74c4942b9a5a..199d33ed3bb9 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -474,6 +474,7 @@ static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
static const struct ieee80211_ops lbtf_ops = {
.tx = lbtf_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = lbtf_op_start,
.stop = lbtf_op_stop,
.add_interface = lbtf_op_add_interface,
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index bda53cb91f37..52b18f4a774b 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -874,7 +874,7 @@ mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
return -EFAULT;
- if (strtobool(kbuf, &timeshare_coex))
+ if (kstrtobool(kbuf, &timeshare_coex))
return -EINVAL;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 63f861e6b28a..b95886e1413e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -10,6 +10,7 @@
#include <linux/completion.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 4dc7e2e53b81..13bcb123d122 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5611,6 +5611,7 @@ static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
static const struct ieee80211_ops mwl8k_ops = {
.tx = mwl8k_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mwl8k_start,
.stop = mwl8k_stop,
.add_interface = mwl8k_add_interface,
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index 9ff43f1fc50d..d7f90a0eb21e 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -34,3 +34,4 @@ source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7996/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index c78ae4b89761..84c99b7e57f9 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/
obj-$(CONFIG_MT7921_COMMON) += mt7921/
+obj-$(CONFIG_MT7996E) += mt7996/
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index 47e9911ee9fe..11b0b3d62f29 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -100,23 +100,6 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
}
EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
-static int mt76_read_rate_txpower(struct seq_file *s, void *data)
-{
- struct mt76_dev *dev = dev_get_drvdata(s->private);
-
- mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
- ARRAY_SIZE(dev->rate_power.cck));
- mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
- ARRAY_SIZE(dev->rate_power.ofdm));
- mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
- ARRAY_SIZE(dev->rate_power.stbc));
- mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
- ARRAY_SIZE(dev->rate_power.ht));
- mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
- ARRAY_SIZE(dev->rate_power.vht));
- return 0;
-}
-
struct dentry *
mt76_register_debugfs_fops(struct mt76_phy *phy,
const struct file_operations *ops)
@@ -137,8 +120,6 @@ mt76_register_debugfs_fops(struct mt76_phy *phy,
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
- debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
- mt76_read_rate_txpower);
debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir,
mt76_rx_queues_read);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 7378c4d1e156..f795548562f5 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -60,6 +60,19 @@ mt76_alloc_txwi(struct mt76_dev *dev)
}
static struct mt76_txwi_cache *
+mt76_alloc_rxwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
+ if (!t)
+ return NULL;
+
+ t->ptr = NULL;
+ return t;
+}
+
+static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
@@ -76,6 +89,22 @@ __mt76_get_txwi(struct mt76_dev *dev)
}
static struct mt76_txwi_cache *
+__mt76_get_rxwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = NULL;
+
+ spin_lock(&dev->wed_lock);
+ if (!list_empty(&dev->rxwi_cache)) {
+ t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
+ list);
+ list_del(&t->list);
+ }
+ spin_unlock(&dev->wed_lock);
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -86,6 +115,18 @@ mt76_get_txwi(struct mt76_dev *dev)
return mt76_alloc_txwi(dev);
}
+struct mt76_txwi_cache *
+mt76_get_rxwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
+
+ if (t)
+ return t;
+
+ return mt76_alloc_rxwi(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_get_rxwi);
+
void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -98,6 +139,18 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
}
EXPORT_SYMBOL_GPL(mt76_put_txwi);
+void
+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ if (!t)
+ return;
+
+ spin_lock(&dev->wed_lock);
+ list_add(&t->list, &dev->rxwi_cache);
+ spin_unlock(&dev->wed_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_put_rxwi);
+
static void
mt76_free_pending_txwi(struct mt76_dev *dev)
{
@@ -113,6 +166,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
}
static void
+mt76_free_pending_rxwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ local_bh_disable();
+ while ((t = __mt76_get_rxwi(dev)) != NULL) {
+ if (t->ptr)
+ skb_free_frag(t->ptr);
+ kfree(t);
+ }
+ local_bh_enable();
+}
+
+static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(dev, q, desc_base, q->desc_dma);
@@ -148,11 +215,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
u32 ctrl;
int i, idx = -1;
- if (txwi) {
- q->entry[q->head].txwi = DMA_DUMMY_DATA;
- q->entry[q->head].skip_buf0 = true;
- }
-
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@@ -162,28 +224,48 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
desc = &q->desc[idx];
entry = &q->entry[idx];
- if (buf[0].skip_unmap)
- entry->skip_buf0 = true;
- entry->skip_buf1 = i == nbufs - 1;
-
- entry->dma_addr[0] = buf[0].addr;
- entry->dma_len[0] = buf[0].len;
-
- ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
- if (i < nbufs - 1) {
- entry->dma_addr[1] = buf[1].addr;
- entry->dma_len[1] = buf[1].len;
- buf1 = buf[1].addr;
- ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
- if (buf[1].skip_unmap)
- entry->skip_buf1 = true;
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ struct mt76_txwi_cache *t = txwi;
+ int rx_token;
+
+ if (!t)
+ return -ENOMEM;
+
+ rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
+ buf[0].addr);
+ buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
+ MT_DMA_CTL_TO_HOST;
+ } else {
+ if (txwi) {
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
+ q->entry[q->head].skip_buf0 = true;
+ }
+
+ if (buf[0].skip_unmap)
+ entry->skip_buf0 = true;
+ entry->skip_buf1 = i == nbufs - 1;
+
+ entry->dma_addr[0] = buf[0].addr;
+ entry->dma_len[0] = buf[0].len;
+
+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+ if (i < nbufs - 1) {
+ entry->dma_addr[1] = buf[1].addr;
+ entry->dma_len[1] = buf[1].len;
+ buf1 = buf[1].addr;
+ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+ if (buf[1].skip_unmap)
+ entry->skip_buf1 = true;
+ }
+
+ if (i == nbufs - 1)
+ ctrl |= MT_DMA_CTL_LAST_SEC0;
+ else if (i == nbufs - 2)
+ ctrl |= MT_DMA_CTL_LAST_SEC1;
}
- if (i == nbufs - 1)
- ctrl |= MT_DMA_CTL_LAST_SEC0;
- else if (i == nbufs - 2)
- ctrl |= MT_DMA_CTL_LAST_SEC1;
-
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
@@ -272,33 +354,60 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
- int *len, u32 *info, bool *more)
+ int *len, u32 *info, bool *more, bool *drop)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
- dma_addr_t buf_addr;
- void *buf = e->buf;
- int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+ void *buf;
- buf_addr = e->dma_addr[0];
if (len) {
- u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
- *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
- *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+ u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
+ *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+ *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
}
if (info)
*info = le32_to_cpu(desc->info);
- dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
- e->buf = NULL;
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
+ le32_to_cpu(desc->buf1));
+ struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
+
+ if (!t)
+ return NULL;
+
+ dma_unmap_single(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ DMA_FROM_DEVICE);
+
+ buf = t->ptr;
+ t->dma_addr = 0;
+ t->ptr = NULL;
+
+ mt76_put_rxwi(dev, t);
+
+ if (drop) {
+ u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
+
+ *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
+ MT_DMA_CTL_DROP));
+ }
+ } else {
+ buf = e->buf;
+ e->buf = NULL;
+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
+ SKB_WITH_OVERHEAD(q->buf_size),
+ DMA_FROM_DEVICE);
+ }
return buf;
}
static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
- int *len, u32 *info, bool *more)
+ int *len, u32 *info, bool *more, bool *drop)
{
int idx = q->tail;
@@ -314,7 +423,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- return mt76_dma_get_buf(dev, q, idx, len, info, more);
+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
}
static int
@@ -441,14 +550,26 @@ free_skb:
return ret;
}
+static struct page_frag_cache *
+mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page_frag_cache *rx_page = &q->rx_page;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
+ rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
+#endif
+ return rx_page;
+}
+
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
- dma_addr_t addr;
- void *buf;
- int frames = 0;
+ struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
int len = SKB_WITH_OVERHEAD(q->buf_size);
- int offset = q->buf_offset;
+ int frames = 0, offset = q->buf_offset;
+ dma_addr_t addr;
if (!q->ndesc)
return 0;
@@ -456,9 +577,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
+ struct mt76_txwi_cache *t = NULL;
struct mt76_queue_buf qbuf;
+ void *buf = NULL;
- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ t = mt76_get_rxwi(dev);
+ if (!t)
+ break;
+ }
+
+ buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
@@ -471,7 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
qbuf.skip_unmap = false;
- mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
frames++;
}
@@ -502,7 +632,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
switch (type) {
case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, false);
if (!ret)
q->wed_regs = wed->tx_ring[ring].reg_base;
break;
@@ -517,6 +647,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
if (!ret)
q->wed_regs = wed->txfree_ring.reg_base;
break;
+ case MT76_WED_Q_RX:
+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, false);
+ if (!ret)
+ q->wed_regs = wed->rx_ring[ring].reg_base;
+ break;
default:
ret = -EINVAL;
}
@@ -574,7 +709,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
do {
- buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
if (!buf)
break;
@@ -615,7 +750,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
- int len, bool more)
+ int len, bool more, u32 info)
{
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -635,7 +770,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
q->rx_head = NULL;
if (nr_frags < ARRAY_SIZE(shinfo->frags))
- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
else
dev_kfree_skb(skb);
}
@@ -656,6 +791,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
while (done < budget) {
+ bool drop = false;
u32 info;
if (check_ddone) {
@@ -666,10 +802,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
break;
}
- data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
+ &drop);
if (!data)
break;
+ if (drop)
+ goto free_frag;
+
if (q->rx_head)
data_len = q->buf_size;
else
@@ -682,7 +822,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
if (q->rx_head) {
- mt76_add_fragment(dev, q, data, len, more);
+ mt76_add_fragment(dev, q, data, len, more, info);
continue;
}
@@ -706,7 +846,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
}
- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
continue;
free_frag:
@@ -803,11 +943,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+
netif_napi_del(&dev->napi[i]);
- mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+ if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
+ mt76_dma_rx_cleanup(dev, q);
}
mt76_free_pending_txwi(dev);
+ mt76_free_pending_rxwi(dev);
if (mtk_wed_device_active(&dev->mmio.wed))
mtk_wed_device_detach(&dev->mmio.wed);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index fdf786f975ea..53c6ce2528b2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -15,6 +15,14 @@
#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
+#define MT_DMA_CTL_TO_HOST BIT(8)
+#define MT_DMA_CTL_TO_HOST_A BIT(12)
+#define MT_DMA_CTL_DROP BIT(14)
+#define MT_DMA_CTL_TOKEN GENMASK(31, 16)
+
+#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)
+#define MT_DMA_PPE_ENTRY GENMASK(30, 16)
+#define MT_DMA_INFO_PPE_VLD BIT(31)
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 6de13d641438..fc608b369b3c 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -443,8 +443,12 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
- ieee80211_hw_set(hw, TX_AMSDU);
- ieee80211_hw_set(hw, TX_FRAG_LIST);
+
+ if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ }
+
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
@@ -568,6 +572,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
spin_lock_init(&dev->status_lock);
+ spin_lock_init(&dev->wed_lock);
mutex_init(&dev->mutex);
init_waitqueue_head(&dev->tx_wait);
@@ -590,9 +595,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
+ spin_lock_init(&dev->rx_token_lock);
+ idr_init(&dev->rx_token);
+
INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->txwi_cache);
+ INIT_LIST_HEAD(&dev->rxwi_cache);
dev->token_size = dev->drv->token_size;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
@@ -947,14 +956,12 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
-static int
-mt76_rx_signal(struct mt76_rx_status *status)
+int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
{
- s8 *chain_signal = status->chain_signal;
int signal = -128;
u8 chains;
- for (chains = status->chains; chains; chains >>= 1, chain_signal++) {
+ for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
int cur, diff;
cur = *chain_signal;
@@ -976,6 +983,7 @@ mt76_rx_signal(struct mt76_rx_status *status)
return signal;
}
+EXPORT_SYMBOL(mt76_rx_signal);
static void
mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
@@ -1005,7 +1013,7 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->ampdu_reference = mstat.ampdu_ref;
status->device_timestamp = mstat.timestamp;
status->mactime = mstat.timestamp;
- status->signal = mt76_rx_signal(&mstat);
+ status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
if (status->signal <= -128)
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
@@ -1289,7 +1297,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
mt76_check_sta(dev, skb);
- mt76_rx_aggr_reorder(skb, &frames);
+ if (mtk_wed_device_active(&dev->mmio.wed))
+ __skb_queue_tail(&frames, skb);
+ else
+ mt76_rx_aggr_reorder(skb, &frames);
}
mt76_rx_complete(dev, &frames, napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 87db9498dea4..32a77a0ae9da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -35,6 +35,7 @@
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
FIELD_PREP(MT_QFLAG_WED_RING, _n))
#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
+#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
struct mt76_dev;
@@ -56,6 +57,7 @@ enum mt76_bus_type {
enum mt76_wed_type {
MT76_WED_Q_TX,
MT76_WED_Q_TXFREE,
+ MT76_WED_Q_RX,
};
struct mt76_bus_ops {
@@ -271,9 +273,15 @@ struct mt76_sta_stats {
u64 tx_nss[4]; /* 1, 2, 3, 4 */
u64 tx_mcs[16]; /* mcs idx */
u64 tx_bytes;
+ /* WED TX */
u32 tx_packets;
u32 tx_retries;
u32 tx_failed;
+ /* WED RX */
+ u64 rx_bytes;
+ u32 rx_packets;
+ u32 rx_errors;
+ u32 rx_drops;
};
enum mt76_wcid_flags {
@@ -339,7 +347,10 @@ struct mt76_txwi_cache {
struct list_head list;
dma_addr_t dma_addr;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *ptr;
+ };
};
struct mt76_rx_tid {
@@ -415,6 +426,7 @@ struct mt76_hw_cap {
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
+#define MT_DRV_AMSDU_OFFLOAD BIT(5)
struct mt76_driver_ops {
u32 drv_flags;
@@ -438,7 +450,7 @@ struct mt76_driver_ops {
bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *info);
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
@@ -470,19 +482,6 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
-struct mt76_rate_power {
- union {
- struct {
- s8 cck[4];
- s8 ofdm[8];
- s8 stbc[10];
- s8 ht[16];
- s8 vht[10];
- };
- s8 all[48];
- };
-};
-
/* addr req mask */
#define MT_VEND_TYPE_EEPROM BIT(31)
#define MT_VEND_TYPE_CFG BIT(30)
@@ -705,6 +704,8 @@ struct mt76_phy {
enum mt76_dfs_state dfs_state;
ktime_t survey_time;
+ u32 aggr_stats[32];
+
struct mt76_hw_cap cap;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
@@ -738,6 +739,7 @@ struct mt76_dev {
struct ieee80211_hw *hw;
+ spinlock_t wed_lock;
spinlock_t lock;
spinlock_t cc_lock;
@@ -764,6 +766,7 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
+ struct list_head rxwi_cache;
struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
@@ -778,6 +781,10 @@ struct mt76_dev {
u16 token_count;
u16 token_size;
+ spinlock_t rx_token_lock;
+ struct idr rx_token;
+ u16 rx_token_size;
+
wait_queue_head_t tx_wait;
/* spinclock used to protect wcid pktid linked list */
spinlock_t status_lock;
@@ -793,8 +800,6 @@ struct mt76_dev {
u32 rev;
- u32 aggr_stats[32];
-
struct tasklet_struct pre_tbtt_tasklet;
int beacon_int;
u8 beacon_mask;
@@ -802,8 +807,6 @@ struct mt76_dev {
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
- struct mt76_rate_power rate_power;
-
char alpha2[3];
enum nl80211_dfs_regions region;
@@ -1107,8 +1110,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
static inline u8 mt76_tx_power_nss_delta(u8 nss)
{
static const u8 nss_delta[4] = { 0, 6, 9, 12 };
+ u8 idx = nss - 1;
- return nss_delta[nss - 1];
+ return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
}
static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
@@ -1163,6 +1167,7 @@ void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
+int mt76_rx_signal(u8 chain_mask, s8 *chain_signal);
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
@@ -1260,6 +1265,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
}
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi);
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
@@ -1404,6 +1411,9 @@ struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
+struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
+int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+ struct mt76_txwi_cache *r, dma_addr_t phys);
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index f52165dff422..3967f2f05774 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -85,7 +85,7 @@ mt7603_ampdu_stat_show(struct seq_file *file, void *data)
bound[i], bound[i + 1]);
seq_puts(file, "\nCount: ");
for (i = 0; i < ARRAY_SIZE(bound); i++)
- seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_printf(file, "%8d | ", dev->mphy.aggr_stats[i]);
seq_puts(file, "\n");
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index f9e5857850e7..03ba11a61c90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -69,7 +69,7 @@ free:
}
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *info)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 49a511ae8161..70a7f84af028 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -39,7 +39,7 @@ void mt7603_mac_reset_counters(struct mt7603_dev *dev)
for (i = 0; i < 2; i++)
mt76_rr(dev, MT_TX_AGG_CNT(i));
- memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+ memset(dev->mphy.aggr_stats, 0, sizeof(dev->mphy.aggr_stats));
}
void mt7603_mac_set_timing(struct mt7603_dev *dev)
@@ -1827,8 +1827,8 @@ void mt7603_mac_work(struct work_struct *work)
for (i = 0, idx = 0; i < 2; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
- dev->mt76.aggr_stats[idx++] += val & 0xffff;
- dev->mt76.aggr_stats[idx++] += val >> 16;
+ dev->mphy.aggr_stats[idx++] += val & 0xffff;
+ dev->mphy.aggr_stats[idx++] += val >> 16;
}
if (dev->mphy.mac_work_count == 10)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 0fd46d907638..7c3be596da09 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *info);
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index c26b45a09923..2a6d317db5e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -278,7 +278,6 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
{
struct mt7615_dev *dev = file->private;
u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0;
- bool ext_phy = phy != &dev->phy;
int bound[7], i, range;
if (!phy)
@@ -292,7 +291,7 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
for (i = 0; i < 3; i++)
bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
- seq_printf(file, "\nPhy %d\n", ext_phy);
+ seq_printf(file, "\nPhy %d\n", phy != &dev->phy);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@@ -300,9 +299,8 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
bound[i], bound[i + 1]);
seq_puts(file, "\nCount: ");
- range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
- seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]);
+ seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 2ce1705c0f43..a95602473359 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -107,9 +107,9 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
-void mt7615_mac_reset_counters(struct mt7615_dev *dev)
+void mt7615_mac_reset_counters(struct mt7615_phy *phy)
{
- struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+ struct mt7615_dev *dev = phy->dev;
int i;
for (i = 0; i < 4; i++) {
@@ -117,10 +117,8 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
mt76_rr(dev, MT_TX_AGG_CNT(1, i));
}
- memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
- dev->mt76.phy.survey_time = ktime_get_boottime();
- if (mphy_ext)
- mphy_ext->survey_time = ktime_get_boottime();
+ memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
+ phy->mt76->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@@ -1177,6 +1175,21 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
}
EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ u32 addr;
+
+ addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4;
+
+ if (enable)
+ mt76_set(dev, addr, MT_WTBL_W3_RTS);
+ else
+ mt76_clear(dev, addr, MT_WTBL_W3_RTS);
+}
+EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
+
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
@@ -1653,7 +1666,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
EXPORT_SYMBOL_GPL(mt7615_rx_check);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
@@ -1999,7 +2012,7 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
- int i, aggr;
+ int i, aggr = 0;
u32 val, val2;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
@@ -2013,7 +2026,6 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
mib->aggr_per = 1000 * (val - val2) / val;
}
- aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
@@ -2026,8 +2038,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
val);
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- dev->mt76.aggr_stats[aggr++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr++] += val >> 16;
+ phy->mt76->aggr_stats[aggr++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr++] += val >> 16;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 8d4733f87cda..ab4c1b4478aa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -83,7 +83,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
if (!running)
- mt7615_mac_reset_counters(dev);
+ mt7615_mac_reset_counters(phy);
out:
mt7615_mutex_release(dev);
@@ -320,7 +320,7 @@ int mt7615_set_channel(struct mt7615_phy *phy)
if (ret)
goto out;
- mt7615_mac_reset_counters(dev);
+ mt7615_mac_reset_counters(phy);
phy->noise = 0;
phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
@@ -572,6 +572,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7615_mac_enable_rtscts(dev, vif, info->use_cts_prot);
+
if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
mt7615_mcu_add_bss_info(phy, vif, NULL, true);
mt7615_mcu_sta_add(phy, vif, NULL, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 3dac76e6df4d..83f30305414d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -1119,7 +1119,7 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
return mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
- enable);
+ enable, NULL);
}
static inline int
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 060d52c81d9e..087d4886162e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -469,10 +469,12 @@ void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_phy *mphy);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
-void mt7615_mac_reset_counters(struct mt7615_dev *dev);
+void mt7615_mac_reset_counters(struct mt7615_phy *phy);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable);
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
+void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -511,7 +513,7 @@ void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_token_put(struct mt7615_dev *dev);
bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *info);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 6712ad9faeaa..fa1b9b26b399 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -446,6 +446,8 @@ enum mt7615_reg_base {
#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
+#define MT_WTBL_W3_RTS BIT(22)
+
#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 635192c878cb..8ba883b03e50 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -187,6 +187,11 @@ static inline bool is_mt7986(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7986;
}
+static inline bool is_mt7996(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7990;
+}
+
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
@@ -261,6 +266,17 @@ mt76_connac_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
return (void *)(txwi + MT_TXD_SIZE);
}
+static inline u8 mt76_connac_spe_idx(u8 antenna_mask)
+{
+ static const u8 ant_to_spe[] = {0, 0, 1, 0, 3, 2, 4, 0,
+ 9, 8, 6, 10, 16, 12, 18, 0};
+
+ if (antenna_mask >= sizeof(ant_to_spe))
+ return 0;
+
+ return ant_to_spe[antenna_mask];
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 34ac3d81a510..fd60123fb284 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -417,9 +417,6 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
if (ieee80211_is_beacon(fc)) {
txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
- if (!is_mt7921(dev))
- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
- 0x18));
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
@@ -550,6 +547,14 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val |= FIELD_PREP(MT_TXD6_TX_RATE, rate);
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+
+ if (!is_mt7921(dev)) {
+ u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask);
+
+ if (!spe_idx)
+ spe_idx = 24 + phy_idx;
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, spe_idx));
+ }
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
@@ -562,7 +567,7 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct mt76_phy *mphy;
struct rate_info rate = {};
bool cck = false;
- u32 txrate, txs, mode;
+ u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
@@ -582,6 +587,10 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
+ stbc = FIELD_GET(MT_TX_RATE_STBC, txrate);
+
+ if (stbc && rate.nss > 1)
+ rate.nss >>= 1;
if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
stats->tx_nss[rate.nss - 1]++;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 011fc9729b38..5a047e630860 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -65,7 +65,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
int cmd;
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
- (is_mt7921(dev) && addr == 0x900000))
+ (is_mt7921(dev) && addr == 0x900000) ||
+ (is_mt7996(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -744,6 +745,39 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->pkt_ext = 2;
}
+static void
+mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct sta_rec_he_v2 *he;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_V2, sizeof(*he));
+
+ he = (struct sta_rec_he_v2 *)tlv;
+ memcpy(he->he_phy_cap, elem->phy_cap_info, sizeof(he->he_phy_cap));
+ memcpy(he->he_mac_cap, elem->mac_cap_info, sizeof(he->he_mac_cap));
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (elem->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+
+ he->max_nss_mcs[CMD_HE_MCS_BW160] =
+ he_cap->he_mcs_nss_supp.rx_mcs_160;
+ fallthrough;
+ default:
+ he->max_nss_mcs[CMD_HE_MCS_BW80] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80;
+ break;
+ }
+
+ he->pkt_ext = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US;
+}
+
static u8
mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
enum nl80211_band band, struct ieee80211_sta *sta)
@@ -838,6 +872,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
/* starec he */
if (sta->deflink.he_cap.has_he) {
mt76_connac_mcu_sta_he_tlv(skb, sta);
+ mt76_connac_mcu_sta_he_tlv_v2(skb, sta);
if (band == NL80211_BAND_6GHZ &&
sta_state == MT76_STA_INFO_STATE_ASSOC) {
struct sta_rec_he_6g_capa *he_6g_capa;
@@ -1184,6 +1219,16 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
+int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ if (!mtk_wed_device_active(&dev->mmio.wed))
+ return 0;
+
+ return mtk_wed_device_update_msg(&dev->mmio.wed, WED_WO_STA_REC,
+ skb->data, skb->len);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_wed_update);
+
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx)
@@ -1209,6 +1254,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
+ ret = mt76_connac_mcu_sta_wed_update(dev, skb);
+ if (ret)
+ return ret;
+
ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
@@ -1219,6 +1268,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
+ ret = mt76_connac_mcu_sta_wed_update(dev, skb);
+ if (ret)
+ return ret;
+
return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
@@ -1313,13 +1366,10 @@ mt76_connac_mcu_uni_bss_he_tlv(struct mt76_phy *phy, struct ieee80211_vif *vif,
he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
}
-int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
- struct mt76_wcid *wcid,
- bool enable)
+int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ struct ieee80211_chanctx_conf *ctx)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &phy->chandef;
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
enum nl80211_band band = chandef->chan->band;
struct mt76_dev *mdev = phy->dev;
@@ -1328,34 +1378,6 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
u8 bss_idx;
u8 pad[3];
} __packed hdr;
- struct mt76_connac_bss_basic_tlv basic;
- struct mt76_connac_bss_qos_tlv qos;
- } basic_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .basic = {
- .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
- .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
- .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
- .dtim_period = vif->bss_conf.dtim_period,
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
- .wmm_idx = mvif->wmm_idx,
- .active = true, /* keep bss deactivated */
- .phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
- },
- .qos = {
- .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
- .len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
- .qos = vif->bss_conf.qos,
- },
- };
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
struct rlm_tlv {
__le16 tag;
__le16 len;
@@ -1388,6 +1410,82 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
.band = band,
},
};
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ rlm_req.rlm.bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rlm_req.rlm.bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ rlm_req.rlm.bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ rlm_req.rlm.bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ rlm_req.rlm.bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ rlm_req.rlm.bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ rlm_req.rlm.bw = CMD_CBW_20MHZ;
+ rlm_req.rlm.ht_op_info = 0;
+ break;
+ }
+
+ if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 1; /* SCA */
+ else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 3; /* SCB */
+
+ return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
+ sizeof(rlm_req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_set_chctx);
+
+int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid,
+ bool enable,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_dev *mdev = phy->dev;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_bss_basic_tlv basic;
+ struct mt76_connac_bss_qos_tlv qos;
+ } basic_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = true, /* keep bss deactivated */
+ .phymode = mt76_connac_get_phy_mode(phy, vif, band, NULL),
+ },
+ .qos = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_qos_tlv)),
+ .qos = vif->bss_conf.qos,
+ },
+ };
int err, conn_type;
u8 idx, basic_phy;
@@ -1474,40 +1572,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
return err;
}
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_40:
- rlm_req.rlm.bw = CMD_CBW_40MHZ;
- break;
- case NL80211_CHAN_WIDTH_80:
- rlm_req.rlm.bw = CMD_CBW_80MHZ;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- rlm_req.rlm.bw = CMD_CBW_8080MHZ;
- break;
- case NL80211_CHAN_WIDTH_160:
- rlm_req.rlm.bw = CMD_CBW_160MHZ;
- break;
- case NL80211_CHAN_WIDTH_5:
- rlm_req.rlm.bw = CMD_CBW_5MHZ;
- break;
- case NL80211_CHAN_WIDTH_10:
- rlm_req.rlm.bw = CMD_CBW_10MHZ;
- break;
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- default:
- rlm_req.rlm.bw = CMD_CBW_20MHZ;
- rlm_req.rlm.ht_op_info = 0;
- break;
- }
-
- if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 1; /* SCA */
- else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 3; /* SCB */
-
- return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
- sizeof(rlm_req), true);
+ return mt76_connac_mcu_uni_set_chctx(phy, mvif, ctx);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
@@ -1525,6 +1590,9 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_connac_hw_scan_req *req;
struct sk_buff *skb;
+ if (test_bit(MT76_HW_SCANNING, &phy->state))
+ return -EBUSY;
+
skb = mt76_mcu_msg_alloc(mdev, NULL, sizeof(*req));
if (!skb)
return -ENOMEM;
@@ -2646,6 +2714,10 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
if (ret)
return ret;
+ ret = mt76_connac_mcu_sta_wed_update(dev, skb);
+ if (ret)
+ return ret;
+
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
@@ -2834,6 +2906,9 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
+ if (region->feature_set & FW_FEATURE_NON_DL)
+ goto next;
+
if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
override = addr;
@@ -2850,6 +2925,7 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
return err;
}
+next:
offset += len;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 718f427d8f6b..f1e942b9a887 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -63,7 +63,7 @@ struct mt76_connac2_mcu_txd {
} __packed __aligned(4);
/**
- * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for firmware v3
+ * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for connac2 and connac3
* @txd: hardware descriptor
* @len: total length not including txd
* @cid: command identifier
@@ -121,11 +121,13 @@ struct mt76_connac2_mcu_rxd {
u8 eid;
u8 seq;
- u8 rsv[2];
-
+ u8 option;
+ u8 rsv;
u8 ext_eid;
u8 rsv1[2];
u8 s2d_index;
+
+ u8 tlv[0];
};
struct mt76_connac2_patch_hdr {
@@ -354,6 +356,16 @@ struct sta_rec_he {
u8 rsv2[2];
} __packed;
+struct sta_rec_he_v2 {
+ __le16 tag;
+ __le16 len;
+ u8 he_mac_cap[6];
+ u8 he_phy_cap[11];
+ u8 pkt_ext;
+ /* 0: BW80, 1: BW160, 2: BW8080 */
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+} __packed;
+
struct sta_rec_amsdu {
__le16 tag;
__le16 len;
@@ -391,7 +403,8 @@ struct sta_rec_phy {
u8 ampdu;
u8 rts_policy;
u8 rcpi;
- u8 rsv[2];
+ u8 max_ampdu_len; /* connac3 */
+ u8 rsv[1];
} __packed;
struct sta_rec_he_6g_capa {
@@ -452,8 +465,8 @@ struct sta_rec_bf {
u8 ibf_dbw;
u8 ibf_ncol;
u8 ibf_nrow;
- u8 nrow_bw160;
- u8 ncol_bw160;
+ u8 nrow_gt_bw80;
+ u8 ncol_gt_bw80;
u8 ru_start_idx;
u8 ru_end_idx;
@@ -580,7 +593,7 @@ struct sta_rec_ra_fixed {
struct sta_phy phy;
- u8 spe_en;
+ u8 spe_idx;
u8 short_preamble;
u8 is_5g;
u8 mmps_mode;
@@ -779,6 +792,9 @@ enum {
STA_REC_BFEE,
STA_REC_PHY = 0x15,
STA_REC_HE_6G = 0x17,
+ STA_REC_HE_V2 = 0x19,
+ STA_REC_HDRT = 0x28,
+ STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
};
@@ -946,6 +962,9 @@ enum {
DEV_INFO_MAX_NUM
};
+#define MCU_UNI_CMD_EVENT BIT(1)
+#define MCU_UNI_CMD_UNSOLICITED_EVENT BIT(2)
+
/* event table */
enum {
MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
@@ -981,6 +1000,17 @@ enum {
MCU_EXT_EVENT_MURU_CTRL = 0x9f,
};
+/* unified event table */
+enum {
+ MCU_UNI_EVENT_RESULT = 0x01,
+ MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04,
+ MCU_UNI_EVENT_IE_COUNTDOWN = 0x09,
+ MCU_UNI_EVENT_RDD_REPORT = 0x11,
+};
+
+#define MCU_UNI_CMD_EVENT BIT(1)
+#define MCU_UNI_CMD_UNSOLICITED_EVENT BIT(2)
+
enum {
MCU_Q_QUERY,
MCU_Q_SET,
@@ -1063,10 +1093,11 @@ enum {
#define MCU_CMD_ACK BIT(0)
#define MCU_CMD_UNI BIT(1)
-#define MCU_CMD_QUERY BIT(2)
+#define MCU_CMD_SET BIT(2)
#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \
- MCU_CMD_QUERY)
+ MCU_CMD_SET)
+#define MCU_CMD_UNI_QUERY_ACK (MCU_CMD_ACK | MCU_CMD_UNI)
#define __MCU_CMD_FIELD_ID GENMASK(7, 0)
#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
@@ -1074,6 +1105,7 @@ enum {
#define __MCU_CMD_FIELD_UNI BIT(17)
#define __MCU_CMD_FIELD_CE BIT(18)
#define __MCU_CMD_FIELD_WA BIT(19)
+#define __MCU_CMD_FIELD_WM BIT(20)
#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \
MCU_CMD_##_t)
@@ -1095,6 +1127,16 @@ enum {
FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
MCU_WA_PARAM_CMD_##_t))
+#define MCU_WM_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \
+ __MCU_CMD_FIELD_WM)
+#define MCU_WM_UNI_CMD_QUERY(_t) (MCU_UNI_CMD(_t) | \
+ __MCU_CMD_FIELD_QUERY | \
+ __MCU_CMD_FIELD_WM)
+#define MCU_WA_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \
+ __MCU_CMD_FIELD_WA)
+#define MCU_WMWA_UNI_CMD(_t) (MCU_WM_UNI_CMD(_t) | \
+ __MCU_CMD_FIELD_WA)
+
enum {
MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
@@ -1148,10 +1190,33 @@ enum {
MCU_UNI_CMD_DEV_INFO_UPDATE = 0x01,
MCU_UNI_CMD_BSS_INFO_UPDATE = 0x02,
MCU_UNI_CMD_STA_REC_UPDATE = 0x03,
+ MCU_UNI_CMD_EDCA_UPDATE = 0x04,
MCU_UNI_CMD_SUSPEND = 0x05,
MCU_UNI_CMD_OFFLOAD = 0x06,
MCU_UNI_CMD_HIF_CTRL = 0x07,
+ MCU_UNI_CMD_BAND_CONFIG = 0x08,
+ MCU_UNI_CMD_REPT_MUAR = 0x09,
+ MCU_UNI_CMD_WSYS_CONFIG = 0x0b,
+ MCU_UNI_CMD_REG_ACCESS = 0x0d,
+ MCU_UNI_CMD_POWER_CREL = 0x0f,
+ MCU_UNI_CMD_RX_HDR_TRANS = 0x12,
+ MCU_UNI_CMD_SER = 0x13,
+ MCU_UNI_CMD_TWT = 0x14,
+ MCU_UNI_CMD_RDD_CTRL = 0x19,
+ MCU_UNI_CMD_GET_MIB_INFO = 0x22,
MCU_UNI_CMD_SNIFFER = 0x24,
+ MCU_UNI_CMD_SR = 0x25,
+ MCU_UNI_CMD_ROC = 0x27,
+ MCU_UNI_CMD_TXPOWER = 0x2b,
+ MCU_UNI_CMD_EFUSE_CTRL = 0x2d,
+ MCU_UNI_CMD_RA = 0x2f,
+ MCU_UNI_CMD_MURU = 0x31,
+ MCU_UNI_CMD_BF = 0x33,
+ MCU_UNI_CMD_CHANNEL_SWITCH = 0x34,
+ MCU_UNI_CMD_THERMAL = 0x35,
+ MCU_UNI_CMD_VOW = 0x37,
+ MCU_UNI_CMD_RRO = 0x57,
+ MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
};
enum {
@@ -1201,14 +1266,23 @@ enum {
enum {
UNI_BSS_INFO_BASIC = 0,
+ UNI_BSS_INFO_RA = 1,
UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_BSS_COLOR = 4,
UNI_BSS_INFO_HE_BASIC = 5,
UNI_BSS_INFO_BCN_CONTENT = 7,
+ UNI_BSS_INFO_BCN_CSA = 8,
+ UNI_BSS_INFO_BCN_BCC = 9,
+ UNI_BSS_INFO_BCN_MBSSID = 10,
+ UNI_BSS_INFO_RATE = 11,
UNI_BSS_INFO_QBSS = 15,
+ UNI_BSS_INFO_SEC = 16,
+ UNI_BSS_INFO_TXCMD = 18,
UNI_BSS_INFO_UAPSD = 19,
UNI_BSS_INFO_PS = 21,
UNI_BSS_INFO_BCNFT = 22,
+ UNI_BSS_INFO_OFFLOAD = 25,
+ UNI_BSS_INFO_MLD = 26,
};
enum {
@@ -1736,10 +1810,14 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
int cmd, bool enable, bool tx);
+int mt76_connac_mcu_uni_set_chctx(struct mt76_phy *phy,
+ struct mt76_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
- bool enable);
+ bool enable,
+ struct ieee80211_chanctx_conf *ctx);
int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
@@ -1813,6 +1891,7 @@ int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
+int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
const char *fw_wa);
int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index da2ca2563ac9..c3a392a1a659 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -151,7 +151,7 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
struct ieee80211_channel *chan,
- struct mt76_rate_power *t)
+ struct mt76x02_rate_power *t)
{
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
u16 val, addr;
@@ -179,31 +179,19 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
/* ht-vht mcs 1ss 0, 1, 2, 3 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
val = mt76x02_eeprom_get(dev, addr);
- t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
- t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
+ t->ht[0] = t->ht[1] = s6_to_s8(val);
+ t->ht[2] = t->ht[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
val = mt76x02_eeprom_get(dev, addr);
- t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
- t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
-
- /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
- addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
- val = mt76x02_eeprom_get(dev, addr);
- t->stbc[0] = t->stbc[1] = s6_to_s8(val);
- t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
-
- /* ht-vht mcs 1ss 4, 5, 6 stbc */
- addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
- val = mt76x02_eeprom_get(dev, addr);
- t->stbc[4] = t->stbc[5] = s6_to_s8(val);
- t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
+ t->ht[4] = t->ht[5] = s6_to_s8(val);
+ t->ht[6] = t->ht[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
val = mt76x02_eeprom_get(dev, 0x12c);
- t->vht[8] = s6_to_s8(val);
- t->vht[9] = s6_to_s8(val >> 8);
+ t->vht[0] = s6_to_s8(val);
+ t->vht[1] = s6_to_s8(val >> 8);
delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
mt76x02_add_rate_power_offset(t, delta);
@@ -235,7 +223,7 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev,
data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER);
else
data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
- target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
+ target_power = (data & 0xff) - dev->rate_power.ofdm[7];
*tp = target_power + mt76x0_get_delta(dev);
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index 15540ce8db87..08f1b10bf3ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -19,7 +19,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev);
void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
struct ieee80211_channel *chan,
- struct mt76_rate_power *t);
+ struct mt76x02_rate_power *t);
void mt76x0_get_power_info(struct mt76x02_dev *dev,
struct ieee80211_channel *chan, s8 *tp);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 66d47c70111a..6257460f8de5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -217,7 +217,7 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband)
{
struct ieee80211_channel *chan;
- struct mt76_rate_power t;
+ struct mt76x02_rate_power t;
s8 tp;
int i;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index e91c314cdfac..6c6c8ada7943 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -595,10 +595,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
case 0:
/* cck rates */
tx_rate = (info[0] & 0x60) >> 5;
- if (tx_rate > 3)
- return -EINVAL;
-
- *target_power = cur_power + dev->mt76.rate_power.cck[tx_rate];
+ *target_power = cur_power + dev->rate_power.cck[tx_rate];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, tx_rate);
break;
case 1: {
@@ -635,7 +632,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
return -EINVAL;
}
- *target_power = cur_power + dev->mt76.rate_power.ofdm[index];
+ *target_power = cur_power + dev->rate_power.ofdm[index];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, index + 4);
break;
}
@@ -645,7 +642,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
if (tx_rate > 9)
return -EINVAL;
- *target_power = cur_power + dev->mt76.rate_power.vht[tx_rate];
+ *target_power = cur_power + dev->rate_power.vht[tx_rate];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
break;
default:
@@ -654,7 +651,7 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
if (tx_rate > 9)
return -EINVAL;
- *target_power = cur_power + dev->mt76.rate_power.ht[tx_rate];
+ *target_power = cur_power + dev->rate_power.ht[tx_rate];
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
break;
}
@@ -841,7 +838,7 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
{
- struct mt76_rate_power *t = &dev->mt76.rate_power;
+ struct mt76x02_rate_power *t = &dev->rate_power;
s8 info;
mt76x0_get_tx_power_per_rate(dev, dev->mphy.chandef.chan, t);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 50eaeff11af3..4cd63bacd742 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -72,6 +72,18 @@ struct mt76x02_beacon_ops {
#define mt76x02_pre_tbtt_enable(dev, enable) \
(dev)->beacon_ops->pre_tbtt_enable(dev, enable)
+struct mt76x02_rate_power {
+ union {
+ struct {
+ s8 cck[4];
+ s8 ofdm[8];
+ s8 ht[16];
+ s8 vht[2];
+ };
+ s8 all[30];
+ };
+};
+
struct mt76x02_dev {
union { /* must be first */
struct mt76_dev mt76;
@@ -107,6 +119,8 @@ struct mt76x02_dev {
u8 beacon_hang_check;
u8 mcu_timeout;
+ struct mt76x02_rate_power rate_power;
+
struct mt76x02_calibration cal;
int txpower_conf;
@@ -174,7 +188,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *info);
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index c4fe1c436aaa..8ce4bf44733d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -20,7 +20,7 @@ mt76x02_ampdu_stat_show(struct seq_file *file, void *data)
seq_puts(file, "Count: ");
for (j = 0; j < 8; j++)
seq_printf(file, "%8d | ",
- dev->mt76.aggr_stats[i * 8 + j]);
+ dev->mphy.aggr_stats[i * 8 + j]);
seq_puts(file, "\n");
seq_puts(file, "--------");
for (j = 0; j < 8; j++)
@@ -114,6 +114,21 @@ mt76_edcca_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
"%lld\n");
+static int mt76x02_read_rate_txpower(struct seq_file *s, void *data)
+{
+ struct mt76x02_dev *dev = dev_get_drvdata(s->private);
+
+ mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
+ ARRAY_SIZE(dev->rate_power.cck));
+ mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
+ ARRAY_SIZE(dev->rate_power.ofdm));
+ mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
+ ARRAY_SIZE(dev->rate_power.ht));
+ mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
+ ARRAY_SIZE(dev->rate_power.vht));
+ return 0;
+}
+
void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
@@ -133,6 +148,8 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rate_txpower", dir,
+ mt76x02_read_rate_txpower);
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index 99941a4700f3..13fa70853b0d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -62,8 +62,6 @@ enum mt76x02_eeprom_field {
MT_EE_TX_POWER_HT_MCS4 = 0x0a8,
MT_EE_TX_POWER_HT_MCS8 = 0x0aa,
MT_EE_TX_POWER_HT_MCS12 = 0x0ac,
- MT_EE_TX_POWER_VHT_MCS0 = 0x0ba,
- MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
MT_EE_2G_TARGET_POWER = 0x0d0,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 93d96739f802..d3f74473e6fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -25,7 +25,7 @@ void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
for (i = 0; i < 16; i++)
mt76_rr(dev, MT_TX_STAT_FIFO);
- memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+ memset(dev->mphy.aggr_stats, 0, sizeof(dev->mphy.aggr_stats));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
@@ -1191,8 +1191,8 @@ void mt76x02_mac_work(struct work_struct *work)
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
- dev->mt76.aggr_stats[idx++] += val & 0xffff;
- dev->mt76.aggr_stats[idx++] += val >> 16;
+ dev->mphy.aggr_stats[idx++] += val & 0xffff;
+ dev->mphy.aggr_stats[idx++] += val >> 16;
}
mt76x02_check_mac_err(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index 2e53b0c1afdd..cbe7e6f0c29a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -59,7 +59,7 @@ mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
return val;
}
-int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
+int mt76x02_get_max_rate_power(struct mt76x02_rate_power *r)
{
s8 ret = 0;
int i;
@@ -71,7 +71,7 @@ int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
}
EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
-void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
+void mt76x02_limit_rate_power(struct mt76x02_rate_power *r, int limit)
{
int i;
@@ -81,7 +81,7 @@ void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
}
EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
-void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+void mt76x02_add_rate_power_offset(struct mt76x02_rate_power *r, int offset)
{
int i;
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
{
- struct mt76_rate_power *t = &dev->mt76.rate_power;
+ struct mt76x02_rate_power *t = &dev->rate_power;
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
@@ -107,17 +107,17 @@ void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
t->ht[10]));
mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
- t->stbc[2]));
+ mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->ht[0],
+ t->ht[2]));
mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+ mt76x02_tx_power_mask(t->ht[4], t->ht[6], 0, 0));
mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
- t->vht[9]));
+ mt76x02_tx_power_mask(t->ofdm[7], t->vht[0], t->ht[7],
+ t->vht[1]));
mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+ mt76x02_tx_power_mask(t->ht[14], 0, t->vht[0], t->vht[1]));
mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+ mt76x02_tx_power_mask(t->ht[7], 0, t->vht[0], t->vht[1]));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index 1def25bf735a..84d8a6138b3e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -34,10 +34,10 @@ mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
}
}
-void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
+void mt76x02_add_rate_power_offset(struct mt76x02_rate_power *r, int offset);
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
-void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
-int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x02_limit_rate_power(struct mt76x02_rate_power *r, int limit);
+int mt76x02_get_max_rate_power(struct mt76x02_rate_power *r);
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 96fdf423a348..d8bc4ae185f5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
EXPORT_SYMBOL_GPL(mt76x02_tx);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
void *rxwi = skb->data;
@@ -62,23 +62,23 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->mt76.rate_power.vht[8];
+ max_txpwr = dev->rate_power.vht[0];
} else {
u8 nss, idx;
nss = ieee80211_rate_get_vht_nss(rate);
idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
}
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
} else {
enum nl80211_band band = dev->mphy.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
struct wiphy *wiphy = dev->mt76.hw->wiphy;
- struct mt76_rate_power *rp = &dev->mt76.rate_power;
+ struct mt76x02_rate_power *rp = &dev->rate_power;
r = &wiphy->bands[band]->bitrates[rate->idx];
if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
@@ -86,7 +86,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
else
max_txpwr = rp->ofdm[r->hw_value & 0x7];
} else {
- max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
}
}
@@ -112,7 +112,7 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
s8 txpwr_adj;
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
- dev->mt76.rate_power.ofdm[4]);
+ dev->rate_power.ofdm[4]);
mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index c57e05a5c65e..d5809408d1d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -280,7 +280,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
-void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76x02_rate_power *t,
struct ieee80211_channel *chan)
{
bool is_5ghz;
@@ -324,22 +324,10 @@ void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
- t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
- t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
-
- val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
- t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
- t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
-
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
- t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
-
- memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
- t->stbc[8] = t->vht[8];
- t->stbc[9] = t->vht[9];
+ t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val >> 8);
}
EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index 3755632e6494..43430ef98b11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -40,7 +40,7 @@ struct mt76x2_temp_comp {
unsigned int low_slope; /* J / dB */
};
-void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76x02_rate_power *t,
struct ieee80211_channel *chan);
void mt76x2_get_power_info(struct mt76x02_dev *dev,
struct mt76x2_tx_power_info *t,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 7b01a06d7f8d..19c139290adb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -182,7 +182,7 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
{
struct ieee80211_channel *chan;
struct mt76x2_tx_power_info txp;
- struct mt76_rate_power t = {};
+ struct mt76x02_rate_power t = {};
int i;
for (i = 0; i < sband->n_channels; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index ed2dcb05d614..f84517d932dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -116,7 +116,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+mt76x2_get_min_rate_power(struct mt76x02_rate_power *r)
{
int i;
s8 ret = 0;
@@ -140,7 +140,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
int txp_0, txp_1, delta = 0;
- struct mt76_rate_power t = {};
+ struct mt76x02_rate_power t = {};
int base_power, gain;
mt76x2_get_power_info(dev, &txp, chan);
@@ -175,7 +175,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
dev->target_power = txp.target_power;
dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
- dev->mt76.rate_power = t;
+ dev->rate_power = t;
mt76x02_phy_set_txpower(dev, txp_0, txp_1);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
index f21282cea845..d710726d47bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -2,6 +2,7 @@
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
select MT76_CONNAC_LIB
+ select WANT_DEV_COREDUMP
depends on MAC80211
depends on PCI
select RELAY
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
index b794ceb79c37..797ae49805c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -6,4 +6,5 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
debugfs.o mmio.o
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
-mt7915e-$(CONFIG_MT7986_WMAC) += soc.o \ No newline at end of file
+mt7915e-$(CONFIG_MT7986_WMAC) += soc.o
+mt7915e-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c
new file mode 100644
index 000000000000..d097a56dd33d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include "coredump.h"
+
+static bool coredump_memdump;
+module_param(coredump_memdump, bool, 0644);
+MODULE_PARM_DESC(coredump_memdump, "Optional ability to dump firmware memory");
+
+static const struct mt7915_mem_region mt7915_mem_regions[] = {
+ {
+ .start = 0xe003b400,
+ .len = 0x00003bff,
+ .name = "CRAM",
+ },
+};
+
+static const struct mt7915_mem_region mt7916_mem_regions[] = {
+ {
+ .start = 0x00800000,
+ .len = 0x0005ffff,
+ .name = "ROM",
+ },
+ {
+ .start = 0x00900000,
+ .len = 0x00013fff,
+ .name = "ULM1",
+ },
+ {
+ .start = 0x02200000,
+ .len = 0x0004ffff,
+ .name = "ULM2",
+ },
+ {
+ .start = 0x02300000,
+ .len = 0x0004ffff,
+ .name = "ULM3",
+ },
+ {
+ .start = 0x00400000,
+ .len = 0x00027fff,
+ .name = "SRAM",
+ },
+ {
+ .start = 0xe0000000,
+ .len = 0x00157fff,
+ .name = "CRAM",
+ },
+};
+
+static const struct mt7915_mem_region mt7986_mem_regions[] = {
+ {
+ .start = 0x00800000,
+ .len = 0x0005ffff,
+ .name = "ROM",
+ },
+ {
+ .start = 0x00900000,
+ .len = 0x0000ffff,
+ .name = "ULM1",
+ },
+ {
+ .start = 0x02200000,
+ .len = 0x0004ffff,
+ .name = "ULM2",
+ },
+ {
+ .start = 0x02300000,
+ .len = 0x0004ffff,
+ .name = "ULM3",
+ },
+ {
+ .start = 0x00400000,
+ .len = 0x00017fff,
+ .name = "SRAM",
+ },
+ {
+ .start = 0xe0000000,
+ .len = 0x00113fff,
+ .name = "CRAM",
+ },
+};
+
+const struct mt7915_mem_region*
+mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ *num = ARRAY_SIZE(mt7915_mem_regions);
+ return &mt7915_mem_regions[0];
+ case 0x7986:
+ *num = ARRAY_SIZE(mt7986_mem_regions);
+ return &mt7986_mem_regions[0];
+ case 0x7916:
+ *num = ARRAY_SIZE(mt7916_mem_regions);
+ return &mt7916_mem_regions[0];
+ default:
+ return NULL;
+ }
+}
+
+static int mt7915_coredump_get_mem_size(struct mt7915_dev *dev)
+{
+ const struct mt7915_mem_region *mem_region;
+ size_t size = 0;
+ u32 num;
+ int i;
+
+ mem_region = mt7915_coredump_get_mem_layout(dev, &num);
+ if (!mem_region)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += num * sizeof(struct mt7915_mem_hdr);
+ /* make sure it is aligned 4 bytes for debug message print out */
+ size = ALIGN(size, 4);
+
+ return size;
+}
+
+struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev)
+{
+ struct mt7915_crash_data *crash_data = dev->coredump.crash_data;
+
+ lockdep_assert_held(&dev->dump_mutex);
+
+ guid_gen(&crash_data->guid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+
+static void
+mt7915_coredump_fw_state(struct mt7915_dev *dev, struct mt7915_coredump *dump,
+ bool *exception)
+{
+ u32 state, count, type;
+
+ type = (u32)mt76_get_field(dev, MT_FW_EXCEPT_TYPE, GENMASK(7, 0));
+ state = (u32)mt76_get_field(dev, MT_FW_ASSERT_STAT, GENMASK(7, 0));
+ count = is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, MT_FW_EXCEPT_COUNT, GENMASK(15, 8)) :
+ (u32)mt76_get_field(dev, MT_FW_EXCEPT_COUNT, GENMASK(7, 0));
+
+ /* normal mode: driver can manually trigger assert for detail info */
+ if (!count)
+ strscpy(dump->fw_state, "normal", sizeof(dump->fw_state));
+ else if (state > 1 && (count == 1) && type == 5)
+ strscpy(dump->fw_state, "assert", sizeof(dump->fw_state));
+ else if ((state > 1 && count == 1) || count > 1)
+ strscpy(dump->fw_state, "exception", sizeof(dump->fw_state));
+
+ *exception = !!count;
+}
+
+static void
+mt7915_coredump_fw_trace(struct mt7915_dev *dev, struct mt7915_coredump *dump,
+ bool exception)
+{
+ u32 n, irq, sch, base = MT_FW_EINT_INFO;
+
+ /* trap or run? */
+ dump->last_msg_id = mt76_rr(dev, MT_FW_LAST_MSG_ID);
+
+ n = is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, base, GENMASK(7, 0)) :
+ (u32)mt76_get_field(dev, base, GENMASK(15, 8));
+ dump->eint_info_idx = n;
+
+ irq = mt76_rr(dev, base + 0x8);
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(7, 0), irq) : FIELD_GET(GENMASK(23, 16), irq);
+ dump->irq_info_idx = n;
+
+ sch = mt76_rr(dev, MT_FW_SCHED_INFO);
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(7, 0), sch) : FIELD_GET(GENMASK(15, 8), sch);
+ dump->sched_info_idx = n;
+
+ if (exception) {
+ u32 i, y;
+
+ /* sched trace */
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(15, 8), sch) : FIELD_GET(GENMASK(7, 0), sch);
+ n = n > 60 ? 60 : n;
+
+ strscpy(dump->trace_sched, "(sched_info) id, time",
+ sizeof(dump->trace_sched));
+
+ for (y = dump->sched_info_idx, i = 0; i < n; i++, y++) {
+ mt7915_memcpy_fromio(dev, dump->sched, base + 0xc + y * 12,
+ sizeof(dump->sched));
+ y = y >= n ? 0 : y;
+ }
+
+ /* irq trace */
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(15, 8), irq) : FIELD_GET(GENMASK(7, 0), irq);
+ n = n > 60 ? 60 : n;
+
+ strscpy(dump->trace_irq, "(irq_info) id, time",
+ sizeof(dump->trace_irq));
+
+ for (y = dump->irq_info_idx, i = 0; i < n; i++, y++) {
+ mt7915_memcpy_fromio(dev, dump->irq, base + 0x4 + y * 16,
+ sizeof(dump->irq));
+ y = y >= n ? 0 : y;
+ }
+ }
+}
+
+static void
+mt7915_coredump_fw_stack(struct mt7915_dev *dev, struct mt7915_coredump *dump,
+ bool exception)
+{
+ u32 oldest, i, idx;
+
+ /* stop call stack record */
+ if (!exception)
+ mt76_clear(dev, 0x89050200, BIT(0));
+
+ oldest = (u32)mt76_get_field(dev, 0x89050200, GENMASK(20, 16)) + 2;
+ for (i = 0; i < 16; i++) {
+ idx = ((oldest + 2 * i + 1) % 32);
+ dump->call_stack[i] = mt76_rr(dev, 0x89050204 + idx * 4);
+ }
+
+ /* start call stack record */
+ if (!exception)
+ mt76_set(dev, 0x89050200, BIT(0));
+}
+
+static void
+mt7915_coredump_fw_task(struct mt7915_dev *dev, struct mt7915_coredump *dump)
+{
+ u32 offs = is_mt7915(&dev->mt76) ? 0xe0 : 0x170;
+
+ strscpy(dump->task_qid, "(task queue id) read, write",
+ sizeof(dump->task_qid));
+
+ dump->taskq[0].read = mt76_rr(dev, MT_FW_TASK_QID1);
+ dump->taskq[0].write = mt76_rr(dev, MT_FW_TASK_QID1 - 4);
+ dump->taskq[1].read = mt76_rr(dev, MT_FW_TASK_QID2);
+ dump->taskq[1].write = mt76_rr(dev, MT_FW_TASK_QID2 - 4);
+
+ strscpy(dump->task_info, "(task stack) start, end, size",
+ sizeof(dump->task_info));
+
+ dump->taski[0].start = mt76_rr(dev, MT_FW_TASK_START);
+ dump->taski[0].end = mt76_rr(dev, MT_FW_TASK_END);
+ dump->taski[0].size = mt76_rr(dev, MT_FW_TASK_SIZE);
+ dump->taski[1].start = mt76_rr(dev, MT_FW_TASK_START + offs);
+ dump->taski[1].end = mt76_rr(dev, MT_FW_TASK_END + offs);
+ dump->taski[1].size = mt76_rr(dev, MT_FW_TASK_SIZE + offs);
+}
+
+static void
+mt7915_coredump_fw_context(struct mt7915_dev *dev, struct mt7915_coredump *dump)
+{
+ u32 count, idx, id;
+
+ count = mt76_rr(dev, MT_FW_CIRQ_COUNT);
+
+ /* current context */
+ if (!count) {
+ strscpy(dump->fw_context, "(context) interrupt",
+ sizeof(dump->fw_context));
+
+ idx = is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, MT_FW_CIRQ_IDX, GENMASK(31, 16)) :
+ (u32)mt76_get_field(dev, MT_FW_CIRQ_IDX, GENMASK(15, 0));
+ dump->context.idx = idx;
+ dump->context.handler = mt76_rr(dev, MT_FW_CIRQ_LISR);
+ } else {
+ idx = mt76_rr(dev, MT_FW_TASK_IDX);
+ id = mt76_rr(dev, MT_FW_TASK_ID);
+
+ if (!id && idx == 3) {
+ strscpy(dump->fw_context, "(context) idle",
+ sizeof(dump->fw_context));
+ } else if (id && idx != 3) {
+ strscpy(dump->fw_context, "(context) task",
+ sizeof(dump->fw_context));
+
+ dump->context.idx = idx;
+ dump->context.handler = id;
+ }
+ }
+}
+
+static struct mt7915_coredump *mt7915_coredump_build(struct mt7915_dev *dev)
+{
+ struct mt7915_crash_data *crash_data = dev->coredump.crash_data;
+ struct mt7915_coredump *dump;
+ struct mt7915_coredump_mem *dump_mem;
+ size_t len, sofar = 0, hdr_len = sizeof(*dump);
+ unsigned char *buf;
+ bool exception;
+
+ len = hdr_len;
+
+ if (coredump_memdump && crash_data->memdump_buf_len)
+ len += sizeof(*dump_mem) + crash_data->memdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* this is going to get big when we start dumping memory and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ mutex_lock(&dev->dump_mutex);
+
+ dump = (struct mt7915_coredump *)(buf);
+ dump->len = len;
+
+ /* plain text */
+ strscpy(dump->magic, "mt76-crash-dump", sizeof(dump->magic));
+ strscpy(dump->kernel, init_utsname()->release, sizeof(dump->kernel));
+ strscpy(dump->fw_ver, dev->mt76.hw->wiphy->fw_version,
+ sizeof(dump->fw_ver));
+
+ guid_copy(&dump->guid, &crash_data->guid);
+ dump->tv_sec = crash_data->timestamp.tv_sec;
+ dump->tv_nsec = crash_data->timestamp.tv_nsec;
+ dump->device_id = mt76_chip(&dev->mt76);
+
+ mt7915_coredump_fw_state(dev, dump, &exception);
+ mt7915_coredump_fw_trace(dev, dump, exception);
+ mt7915_coredump_fw_task(dev, dump);
+ mt7915_coredump_fw_context(dev, dump);
+ mt7915_coredump_fw_stack(dev, dump, exception);
+
+ /* gather memory content */
+ dump_mem = (struct mt7915_coredump_mem *)(buf + sofar);
+ dump_mem->len = crash_data->memdump_buf_len;
+ if (coredump_memdump && crash_data->memdump_buf_len)
+ memcpy(dump_mem->data, crash_data->memdump_buf,
+ crash_data->memdump_buf_len);
+
+ mutex_unlock(&dev->dump_mutex);
+
+ return dump;
+}
+
+int mt7915_coredump_submit(struct mt7915_dev *dev)
+{
+ struct mt7915_coredump *dump;
+
+ dump = mt7915_coredump_build(dev);
+ if (!dump) {
+ dev_warn(dev->mt76.dev, "no crash dump data found\n");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(dev->mt76.dev, dump, dump->len, GFP_KERNEL);
+
+ return 0;
+}
+
+int mt7915_coredump_register(struct mt7915_dev *dev)
+{
+ struct mt7915_crash_data *crash_data;
+
+ crash_data = vzalloc(sizeof(*dev->coredump.crash_data));
+ if (!crash_data)
+ return -ENOMEM;
+
+ dev->coredump.crash_data = crash_data;
+
+ if (coredump_memdump) {
+ crash_data->memdump_buf_len = mt7915_coredump_get_mem_size(dev);
+ if (!crash_data->memdump_buf_len)
+ /* no memory content */
+ return 0;
+
+ crash_data->memdump_buf = vzalloc(crash_data->memdump_buf_len);
+ if (!crash_data->memdump_buf) {
+ vfree(crash_data);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void mt7915_coredump_unregister(struct mt7915_dev *dev)
+{
+ if (dev->coredump.crash_data->memdump_buf) {
+ vfree(dev->coredump.crash_data->memdump_buf);
+ dev->coredump.crash_data->memdump_buf = NULL;
+ dev->coredump.crash_data->memdump_buf_len = 0;
+ }
+
+ vfree(dev->coredump.crash_data);
+ dev->coredump.crash_data = NULL;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/coredump.h b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.h
new file mode 100644
index 000000000000..709f8e9c795c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "mt7915.h"
+
+struct trace {
+ u32 id;
+ u32 timestamp;
+};
+
+struct mt7915_coredump {
+ char magic[16];
+
+ u32 len;
+
+ guid_t guid;
+
+ /* time-of-day stamp */
+ u64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ u64 tv_nsec;
+ /* kernel version */
+ char kernel[64];
+ /* firmware version */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ u32 device_id;
+
+ /* exception state */
+ char fw_state[12];
+
+ u32 last_msg_id;
+ u32 eint_info_idx;
+ u32 irq_info_idx;
+ u32 sched_info_idx;
+
+ /* schedule info */
+ char trace_sched[32];
+ struct {
+ struct trace t;
+ u32 pc;
+ } sched[60];
+
+ /* irq info */
+ char trace_irq[32];
+ struct trace irq[60];
+
+ /* task queue status */
+ char task_qid[32];
+ struct {
+ u32 read;
+ u32 write;
+ } taskq[2];
+
+ /* task stack info */
+ char task_info[32];
+ struct {
+ u32 start;
+ u32 end;
+ u32 size;
+ } taski[2];
+
+ /* firmware context */
+ char fw_context[24];
+ struct {
+ u32 idx;
+ u32 handler;
+ } context;
+
+ /* link registers calltrace */
+ u32 call_stack[16];
+
+ /* memory content */
+ u8 data[];
+} __packed;
+
+struct mt7915_coredump_mem {
+ u32 len;
+ u8 data[];
+} __packed;
+
+struct mt7915_mem_hdr {
+ u32 start;
+ u32 len;
+ u8 data[];
+};
+
+struct mt7915_mem_region {
+ u32 start;
+ size_t len;
+
+ const char *name;
+};
+
+#ifdef CONFIG_DEV_COREDUMP
+
+const struct mt7915_mem_region *
+mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num);
+struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev);
+int mt7915_coredump_submit(struct mt7915_dev *dev);
+int mt7915_coredump_register(struct mt7915_dev *dev);
+void mt7915_coredump_unregister(struct mt7915_dev *dev);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline const struct mt7915_mem_region *
+mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num)
+{
+ return NULL;
+}
+
+static inline int mt7915_coredump_submit(struct mt7915_dev *dev)
+{
+ return 0;
+}
+
+static inline struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev)
+{
+ return NULL;
+}
+
+static inline int mt7915_coredump_register(struct mt7915_dev *dev)
+{
+ return 0;
+}
+
+static inline void mt7915_coredump_unregister(struct mt7915_dev *dev)
+{
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 6ef3431cad64..fb46c2c1784f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -46,12 +46,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
/* test knob of system error recovery */
static ssize_t
-mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
+mt7915_sys_recovery_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ bool band = phy->mt76->band_idx;
char buf[16];
int ret = 0;
u16 val;
@@ -71,9 +71,19 @@ mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
return -EINVAL;
switch (val) {
+ /*
+ * 0: grab firmware current SER state.
+ * 1: trigger & enable system error L1 recovery.
+ * 2: trigger & enable system error L2 recovery.
+ * 3: trigger & enable system error L3 rx abort.
+ * 4: trigger & enable system error L3 tx abort
+ * 5: trigger & enable system error L3 tx disable.
+ * 6: trigger & enable system error L3 bf recovery.
+ * 7: trigger & enable system error full recovery.
+ * 8: trigger firmware crash.
+ */
case SER_QUERY:
- /* grab firmware SER stats */
- ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy);
+ ret = mt7915_mcu_set_ser(dev, 0, 0, band);
break;
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
@@ -81,11 +91,28 @@ mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
case SER_SET_RECOVER_L3_TX_ABORT:
case SER_SET_RECOVER_L3_TX_DISABLE:
case SER_SET_RECOVER_L3_BF:
- ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy);
+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), band);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, band);
+ break;
+
+ /* enable full chip reset */
+ case SER_SET_RECOVER_FULL:
+ mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
+ ret = mt7915_mcu_set_ser(dev, 1, 3, band);
if (ret)
return ret;
- ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy);
+ dev->recovery.state |= MT_MCU_CMD_WDT_MASK;
+ mt7915_reset(dev);
+ break;
+
+ /* WARNING: trigger firmware crash */
+ case SER_SET_SYSTEM_ASSERT:
+ mt76_wr(dev, MT_MCU_WM_CIRQ_EINT_MASK_CLR_ADDR, BIT(18));
+ mt76_wr(dev, MT_MCU_WM_CIRQ_EINT_SOFT_ADDR, BIT(18));
break;
default:
break;
@@ -95,20 +122,45 @@ mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
}
static ssize_t
-mt7915_fw_ser_get(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+mt7915_sys_recovery_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
char *buff;
int desc = 0;
ssize_t ret;
- static const size_t bufsz = 400;
+ static const size_t bufsz = 1024;
buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
+ /* HELP */
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "Please echo the correct value ...\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "0: grab firmware transient SER state\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "1: trigger system error L1 recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "2: trigger system error L2 recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "3: trigger system error L3 rx abort\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "4: trigger system error L3 tx abort\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "5: trigger system error L3 tx disable\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "6: trigger system error L3 bf recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "7: trigger system error full recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "8: trigger firmware crash\n");
+
+ /* SER statistics */
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "\nlet's dump firmware SER statistics...\n");
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_STATUS = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_SER_STATS));
@@ -139,15 +191,19 @@ mt7915_fw_ser_get(struct file *file, char __user *user_buf,
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_LMAC_WISR7_B1 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "\nSYS_RESET_COUNT: WM %d, WA %d\n",
+ dev->recovery.wm_reset_count,
+ dev->recovery.wa_reset_count);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
-static const struct file_operations mt7915_fw_ser_ops = {
- .write = mt7915_fw_ser_set,
- .read = mt7915_fw_ser_get,
+static const struct file_operations mt7915_sys_recovery_ops = {
+ .write = mt7915_sys_recovery_set,
+ .read = mt7915_sys_recovery_get,
.open = simple_open,
.llseek = default_llseek,
};
@@ -598,10 +654,6 @@ mt7915_fw_util_wm_show(struct seq_file *file, void *data)
struct mt7915_dev *dev = file->private;
seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC));
- seq_printf(file, "Exception state: 0x%x\n",
- is_mt7915(&dev->mt76) ?
- (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(15, 8)) :
- (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(7, 0)));
if (dev->fw.debug_wm) {
seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
@@ -639,16 +691,17 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
- int bound[15], range[4], i, n;
+ int bound[15], range[4], i;
+ u8 band = phy->mt76->band_idx;
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
- range[i] = mt76_rr(dev, MT_MIB_ARNG(phy->band_idx, i));
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(band, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
- seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, phy->band_idx);
+ seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, band);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@@ -656,9 +709,8 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
bound[i] + 1, bound[i + 1]);
seq_puts(file, "\nCount: ");
- n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
- seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
+ seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
@@ -906,35 +958,199 @@ mt7915_xmit_queues_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_xmit_queues);
-static int
-mt7915_rate_txpower_show(struct seq_file *file, void *data)
+#define mt7915_txpower_puts(prefix, rate) \
+({ \
+ len += scnprintf(buf + len, sz - len, "%-16s:", #prefix " (tmac)"); \
+ for (i = 0; i < mt7915_sku_group_len[rate]; i++, offs++) \
+ len += scnprintf(buf + len, sz - len, " %6d", txpwr[offs]); \
+ len += scnprintf(buf + len, sz - len, "\n"); \
+})
+
+#define mt7915_txpower_sets(rate, pwr, flag) \
+({ \
+ offs += len; \
+ len = mt7915_sku_group_len[rate]; \
+ if (mode == flag) { \
+ for (i = 0; i < len; i++) \
+ req.txpower_sku[offs + i] = pwr; \
+ } \
+})
+
+static ssize_t
+mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ s8 txpwr[MT7915_SKU_RATE_NUM];
+ static const size_t sz = 2048;
+ u8 band = phy->mt76->band_idx;
+ int i, offs = 0, len = 0;
+ ssize_t ret;
+ char *buf;
+ u32 reg;
+
+ buf = kzalloc(sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
+ if (ret)
+ return ret;
+
+ /* Txpower propagation path: TMAC -> TXV -> BBP */
+ len += scnprintf(buf + len, sz - len,
+ "\nPhy%d Tx power table (channel %d)\n",
+ phy != &dev->phy, phy->mt76->chandef.chan->hw_value);
+ len += scnprintf(buf + len, sz - len, "%-16s %6s %6s %6s %6s\n",
+ " ", "1m", "2m", "5m", "11m");
+ mt7915_txpower_puts(CCK, SKU_CCK);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "6m", "9m", "12m", "18m", "24m", "36m", "48m",
+ "54m");
+ mt7915_txpower_puts(OFDM, SKU_OFDM);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4",
+ "mcs5", "mcs6", "mcs7");
+ mt7915_txpower_puts(HT20, SKU_HT_BW20);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7", "mcs32");
+ mt7915_txpower_puts(HT40, SKU_HT_BW40);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11");
+ mt7915_txpower_puts(VHT20, SKU_VHT_BW20);
+ mt7915_txpower_puts(VHT40, SKU_VHT_BW40);
+ mt7915_txpower_puts(VHT80, SKU_VHT_BW80);
+ mt7915_txpower_puts(VHT160, SKU_VHT_BW160);
+ mt7915_txpower_puts(HE26, SKU_HE_RU26);
+ mt7915_txpower_puts(HE52, SKU_HE_RU52);
+ mt7915_txpower_puts(HE106, SKU_HE_RU106);
+ mt7915_txpower_puts(HE242, SKU_HE_RU242);
+ mt7915_txpower_puts(HE484, SKU_HE_RU484);
+ mt7915_txpower_puts(HE996, SKU_HE_RU996);
+ mt7915_txpower_puts(HE996x2, SKU_HE_RU2x996);
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_TPC_CTRL_STAT(band) :
+ MT_WF_PHY_TPC_CTRL_STAT_MT7916(band);
+
+ len += scnprintf(buf + len, sz - len, "\nTx power (bbp) : %6ld\n",
+ mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- static const char * const sku_group_name[] = {
- "CCK", "OFDM", "HT20", "HT40",
- "VHT20", "VHT40", "VHT80", "VHT160",
- "RU26", "RU52", "RU106", "RU242/SU20",
- "RU484/SU40", "RU996/SU80", "RU2x996/SU160"
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct mt7915_mcu_txpower_sku req = {
+ .format_id = TX_POWER_LIMIT_TABLE,
+ .band_idx = phy->mt76->band_idx,
};
- struct mt7915_phy *phy = file->private;
- s8 txpower[MT7915_SKU_RATE_NUM], *buf;
- int i;
+ char buf[100];
+ int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
+ enum mac80211_rx_encoding mode;
+ u32 offs = 0, len = 0;
- seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy);
- mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
- for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
- u8 mcs_num = mt7915_sku_group_len[i];
+ if (count >= sizeof(buf))
+ return -EINVAL;
- if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
- mcs_num = 10;
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
- mt76_seq_puts_array(file, sku_group_name[i], buf, mcs_num);
- buf += mt7915_sku_group_len[i];
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ if (sscanf(buf, "%u %u %u %u %u",
+ &mode, &pwr160, &pwr80, &pwr40, &pwr20) != 5) {
+ dev_warn(dev->mt76.dev,
+ "per bandwidth power limit: Mode BW160 BW80 BW40 BW20");
+ return -EINVAL;
}
- return 0;
+ if (mode > RX_ENC_HE)
+ return -EINVAL;
+
+ if (pwr160)
+ pwr160 = mt7915_get_power_bound(phy, pwr160);
+ if (pwr80)
+ pwr80 = mt7915_get_power_bound(phy, pwr80);
+ if (pwr40)
+ pwr40 = mt7915_get_power_bound(phy, pwr40);
+ if (pwr20)
+ pwr20 = mt7915_get_power_bound(phy, pwr20);
+
+ if (pwr160 < 0 || pwr80 < 0 || pwr40 < 0 || pwr20 < 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt7915_mcu_get_txpower_sku(phy, req.txpower_sku,
+ sizeof(req.txpower_sku));
+ if (ret)
+ goto out;
+
+ mt7915_txpower_sets(SKU_CCK, pwr20, RX_ENC_LEGACY);
+ mt7915_txpower_sets(SKU_OFDM, pwr20, RX_ENC_LEGACY);
+ if (mode == RX_ENC_LEGACY)
+ goto skip;
+
+ mt7915_txpower_sets(SKU_HT_BW20, pwr20, RX_ENC_HT);
+ mt7915_txpower_sets(SKU_HT_BW40, pwr40, RX_ENC_HT);
+ if (mode == RX_ENC_HT)
+ goto skip;
+
+ mt7915_txpower_sets(SKU_VHT_BW20, pwr20, RX_ENC_VHT);
+ mt7915_txpower_sets(SKU_VHT_BW40, pwr40, RX_ENC_VHT);
+ mt7915_txpower_sets(SKU_VHT_BW80, pwr80, RX_ENC_VHT);
+ mt7915_txpower_sets(SKU_VHT_BW160, pwr160, RX_ENC_VHT);
+ if (mode == RX_ENC_VHT)
+ goto skip;
+
+ mt7915_txpower_sets(SKU_HE_RU26, pwr20, RX_ENC_HE + 1);
+ mt7915_txpower_sets(SKU_HE_RU52, pwr20, RX_ENC_HE + 1);
+ mt7915_txpower_sets(SKU_HE_RU106, pwr20, RX_ENC_HE + 1);
+ mt7915_txpower_sets(SKU_HE_RU242, pwr20, RX_ENC_HE);
+ mt7915_txpower_sets(SKU_HE_RU484, pwr40, RX_ENC_HE);
+ mt7915_txpower_sets(SKU_HE_RU996, pwr80, RX_ENC_HE);
+ mt7915_txpower_sets(SKU_HE_RU2x996, pwr160, RX_ENC_HE);
+skip:
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), true);
+ if (ret)
+ goto out;
+
+ mphy->txpower_cur = max(mphy->txpower_cur,
+ max(pwr160, max(pwr80, max(pwr40, pwr20))));
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret ? ret : count;
}
-DEFINE_SHOW_ATTRIBUTE(mt7915_rate_txpower);
+static const struct file_operations mt7915_rate_txpower_fops = {
+ .write = mt7915_rate_txpower_set,
+ .read = mt7915_rate_txpower_get,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
static int
mt7915_twt_stats(struct seq_file *s, void *data)
@@ -963,7 +1179,7 @@ mt7915_twt_stats(struct seq_file *s, void *data)
}
/* The index of RF registers use the generic regidx, combined with two parts:
- * WF selection [31:28] and offset [27:0].
+ * WF selection [31:24] and offset [23:0].
*/
static int
mt7915_rf_regval_get(void *data, u64 *val)
@@ -1010,7 +1226,8 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("xmit-queues", 0400, dir, phy,
&mt7915_xmit_queues_fops);
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
- debugfs_create_file("fw_ser", 0600, dir, phy, &mt7915_fw_ser_ops);
+ debugfs_create_file("sys_recovery", 0600, dir, phy,
+ &mt7915_sys_recovery_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
@@ -1026,7 +1243,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
mt7915_twt_stats);
debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
- if (!dev->dbdc_support || phy->band_idx) {
+ if (!dev->dbdc_support || phy->mt76->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 00aafc2422f3..e3fa064918bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -11,7 +11,11 @@ mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base
struct mt7915_dev *dev = phy->dev;
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
- ring_base = MT_WED_TX_RING_BASE;
+ if (is_mt7986(&dev->mt76))
+ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
+ else
+ ring_base = MT_WED_TX_RING_BASE;
+
idx -= MT_TXQ_ID(0);
}
@@ -46,29 +50,65 @@ static void mt7915_dma_config(struct mt7915_dev *dev)
#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
if (is_mt7915(&dev->mt76)) {
- RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
- RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
- RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
- RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
- RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0,
+ MT7915_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM,
+ MT7915_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA,
+ MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1,
+ MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT,
+ MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN,
+ MT7915_RXQ_MCU_WA);
TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
- MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
- MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
- MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM,
+ MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA,
+ MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL,
+ MT7915_TXQ_FWDL);
} else {
- RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
- RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
- RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
- RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
- RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
- MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
- MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
- MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM,
+ MT7916_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916,
+ MT7916_RXQ_MCU_WA_EXT);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM,
+ MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916,
+ MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL,
+ MT7915_TXQ_FWDL);
+
+ if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916,
+ MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916,
+ MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_WED_RX_DONE_BAND1_MT7916,
+ MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916,
+ MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0,
+ MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1,
+ MT7915_TXQ_BAND1);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916,
+ MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA,
+ MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916,
+ MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916,
+ MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7915_TXQ_BAND1);
+ }
}
}
@@ -313,17 +353,26 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD;
- if (!dev->phy.band_idx)
+ if (!dev->phy.mt76->band_idx)
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (dev->dbdc_support || dev->phy.band_idx)
+ if (dev->dbdc_support || dev->phy.mt76->band_idx)
irq_mask |= MT_INT_BAND1_RX_DONE;
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
u32 wed_irq_mask = irq_mask;
+ int ret;
wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
- mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
+ if (!is_mt7986(&dev->mt76))
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
+ else
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+
+ ret = mt7915_mcu_wed_enable_rx_stats(dev);
+ if (ret)
+ return ret;
+
mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
}
@@ -348,20 +397,28 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
mt7915_dma_disable(dev, true);
- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
-
- mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ if (!is_mt7986(mdev)) {
+ u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2;
+
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_WED);
+ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1,
+ wed_control_rx1));
+ if (is_mt7915(mdev))
+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
+ MT_WFDMA0_EXT0_RXWB_KEEP);
+ }
} else {
mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
}
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy,
- MT_TXQ_ID(dev->phy.band_idx),
+ MT_TXQ_ID(dev->phy.mt76->band_idx),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(0));
if (ret)
@@ -369,7 +426,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (phy2) {
ret = mt7915_init_tx_queues(phy2,
- MT_TXQ_ID(phy2->band_idx),
+ MT_TXQ_ID(phy2->mt76->band_idx),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(1));
if (ret)
@@ -410,7 +467,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return ret;
/* event from WA */
- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA;
dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
@@ -425,7 +482,14 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return ret;
/* rx data queue for band0 */
- if (!dev->phy.band_idx) {
+ if (!dev->phy.mt76->band_idx) {
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ dev->mt76.q_rx[MT_RXQ_MAIN].flags =
+ MT_WED_Q_RX(MT7915_RXQ_BAND0);
+ dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT_RXQ_ID(MT_RXQ_MAIN),
MT7915_RX_RING_SIZE,
@@ -437,16 +501,32 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
/* tx free notify event from WA for band0 */
if (!is_mt7915(mdev)) {
+ wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA);
+ wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA);
+
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
+ if (is_mt7916(mdev)) {
+ wa_rx_base = MT_WED_RX_RING_BASE;
+ wa_rx_idx = MT7915_RXQ_MCU_WA;
+ }
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
- MT_RXQ_ID(MT_RXQ_MAIN_WA),
- MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE, wa_rx_base);
if (ret)
return ret;
}
- if (dev->dbdc_support || dev->phy.band_idx) {
+ if (dev->dbdc_support || dev->phy.mt76->band_idx) {
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags =
+ MT_WED_Q_RX(MT7915_RXQ_BAND1);
+ dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ }
+
/* rx data queue for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
@@ -479,6 +559,53 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return 0;
}
+int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
+{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+ int i;
+
+ /* clean up hw queues */
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) {
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ if (mphy_ext)
+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
+
+ /* reset wfsys */
+ if (force)
+ mt7915_wfsys_reset(dev);
+
+ mt7915_dma_disable(dev, force);
+
+ /* reset hw queues */
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ if (mphy_ext)
+ mt76_queue_reset(dev, mphy_ext->q_tx[i]);
+ }
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+
+ mt76_tx_status_check(&dev->mt76, true);
+
+ mt7915_dma_enable(dev);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_reset(dev, i);
+
+ return 0;
+}
+
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
mt7915_dma_disable(dev, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 4b1a9811646f..59069fb86414 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -131,9 +131,10 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
u8 *eeprom = dev->mt76.eeprom.data;
+ u8 band = phy->mt76->band_idx;
u32 val;
- val = eeprom[MT_EE_WIFI_CONF + phy->band_idx];
+ val = eeprom[MT_EE_WIFI_CONF + band];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
if (!is_mt7915(&dev->mt76)) {
@@ -153,7 +154,7 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
return;
}
} else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) {
- val = phy->band_idx ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+ val = band ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
}
switch (val) {
@@ -173,60 +174,51 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
struct mt7915_phy *phy)
{
- u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
+ u8 path, nss, nss_max = 4, *eeprom = dev->mt76.eeprom.data;
struct mt76_phy *mphy = phy->mt76;
- bool ext_phy = phy != &dev->phy;
+ u8 band = phy->mt76->band_idx;
mt7915_eeprom_parse_band_config(phy);
- /* read tx/rx mask from eeprom */
+ /* read tx/rx path from eeprom */
if (is_mt7915(&dev->mt76)) {
- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
- eeprom[MT_EE_WIFI_CONF]);
+ path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF]);
} else {
- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
- eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
+ path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF + band]);
}
- if (!nss || nss > 4)
- nss = 4;
+ if (!path || path > 4)
+ path = 4;
/* read tx/rx stream */
- nss_band = nss;
-
+ nss = path;
if (dev->dbdc_support) {
if (is_mt7915(&dev->mt76)) {
- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
- eeprom[MT_EE_WIFI_CONF + 3]);
- if (phy->band_idx)
- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
- eeprom[MT_EE_WIFI_CONF + 3]);
+ path = min_t(u8, path, 2);
+ nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (band)
+ nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
} else {
- nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
- eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
+ nss = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+ eeprom[MT_EE_WIFI_CONF + 2 + band]);
}
- nss_band_max = is_mt7986(&dev->mt76) ?
- MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
- } else {
- nss_band_max = is_mt7986(&dev->mt76) ?
- MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
+ if (!is_mt7986(&dev->mt76))
+ nss_max = 2;
}
- if (!nss_band || nss_band > nss_band_max)
- nss_band = nss_band_max;
-
- if (nss_band > nss) {
- dev_warn(dev->mt76.dev,
- "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
- nss, nss_band, phy->band_idx, ext_phy);
- nss = nss_band;
- }
+ if (!nss)
+ nss = nss_max;
+ nss = min_t(u8, min_t(u8, nss_max, nss), path);
- mphy->chainmask = BIT(nss) - 1;
- if (ext_phy)
+ mphy->chainmask = BIT(path) - 1;
+ if (band)
mphy->chainmask <<= dev->chainshift;
- mphy->antenna_mask = BIT(nss_band) - 1;
+ mphy->antenna_mask = BIT(nss) - 1;
dev->chainmask |= mphy->chainmask;
dev->chainshift = hweight8(dev->mphy.chainmask);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 7578ac6d0be6..f3e56817d36e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -58,11 +58,6 @@ enum mt7915_eeprom_field {
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
-#define MT_EE_NSS_MAX_MA7915 4
-#define MT_EE_NSS_MAX_DBDC_MA7915 2
-#define MT_EE_NSS_MAX_MA7986 4
-#define MT_EE_NSS_MAX_DBDC_MA7986 4
-
enum mt7915_adie_sku {
MT7976_ONE_ADIE_DBDC = 0x7,
MT7975_ONE_ADIE = 0x8,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index cc2aac86bcfb..c810c31fbd6e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -8,6 +8,7 @@
#include "mt7915.h"
#include "mac.h"
#include "mcu.h"
+#include "coredump.h"
#include "eeprom.h"
static const struct ieee80211_iface_limit if_limits[] = {
@@ -262,9 +263,8 @@ static void mt7915_led_set_brightness(struct led_classdev *led_cdev,
mt7915_led_set_config(led_cdev, 0xff, 0);
}
-static void
-mt7915_init_txpower(struct mt7915_dev *dev,
- struct ieee80211_supported_band *sband)
+void mt7915_init_txpower(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask);
int nss_delta = mt76_tx_power_nss_delta(n_chains);
@@ -353,6 +353,10 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+
+ if (!is_mt7915(&dev->mt76))
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
if (!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
@@ -444,9 +448,32 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
/* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
+
+ /* clear estimated value of EIFS for Rx duration & OBSS time */
+ mt76_wr(dev, MT_WF_RMAC_RSVD0(band), MT_WF_RMAC_RSVD0_EIFS_CLR);
+
+ /* clear backoff time for Rx duration */
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME1(band),
+ MT_WF_RMAC_MIB_NONQOSD_BACKOFF);
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME3(band),
+ MT_WF_RMAC_MIB_QOS01_BACKOFF);
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
+ MT_WF_RMAC_MIB_QOS23_BACKOFF);
+
+ /* clear backoff time and set software compensation for OBSS time */
+ mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
+ set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
+ FIELD_PREP(MT_WF_RMAC_MIB_ED_OFFSET, 4);
+ mt76_rmw(dev, MT_WF_RMAC_MIB_AIRTIME0(band), mask, set);
+
+ /* filter out non-resp frames and get instanstaeous signal reporting */
+ mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
+ set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
+ FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
+ mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
}
-static void mt7915_mac_init(struct mt7915_dev *dev)
+void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
@@ -476,7 +503,7 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
}
}
-static int mt7915_txbf_init(struct mt7915_dev *dev)
+int mt7915_txbf_init(struct mt7915_dev *dev)
{
int ret;
@@ -513,7 +540,7 @@ mt7915_alloc_ext_phy(struct mt7915_dev *dev)
phy->mt76 = mphy;
/* Bind main phy to band0 and ext_phy to band1 for dbdc case */
- phy->band_idx = 1;
+ phy->mt76->band_idx = 1;
return phy;
}
@@ -633,7 +660,7 @@ static bool mt7915_band_config(struct mt7915_dev *dev)
{
bool ret = true;
- dev->phy.band_idx = 0;
+ dev->phy.mt76->band_idx = 0;
if (is_mt7986(&dev->mt76)) {
u32 sku = mt7915_check_adie(dev, true);
@@ -644,7 +671,7 @@ static bool mt7915_band_config(struct mt7915_dev *dev)
* dbdc is disabled.
*/
if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
- dev->phy.band_idx = 1;
+ dev->phy.mt76->band_idx = 1;
ret = false;
}
} else {
@@ -700,45 +727,49 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
{
- int nss;
+ int sts;
u32 *cap;
if (!phy->mt76->cap.has_5ghz)
return;
- nss = hweight8(phy->mt76->chainmask);
+ sts = hweight8(phy->mt76->chainmask);
cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+ FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ sts - 1);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
- if (nss < 2)
+ if (sts < 2)
return;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE |
FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
- nss - 1);
+ sts - 1);
}
static void
-mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
- struct ieee80211_sta_he_cap *he_cap,
- int vif, int nss)
+mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
+ struct ieee80211_sta_he_cap *he_cap, int vif)
{
+ struct mt7915_dev *dev = phy->dev;
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
- u8 c, nss_160;
+ int sts = hweight8(phy->mt76->chainmask);
+ u8 c, sts_160 = sts;
- /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
- if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
- nss_160 = nss / 2;
- else
- nss_160 = nss;
+ /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76)) {
+ if (!dev->dbdc_support)
+ sts_160 /= 2;
+ else
+ sts_160 = 0;
+ }
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -748,8 +779,9 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
- c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ if (sts_160)
+ c |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
elem->phy_cap_info[5] &= ~c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
@@ -765,8 +797,9 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (sts_160)
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
elem->phy_cap_info[4] |= c;
/* do not support NG16 due to spec D4.0 changes subcarrier idx */
@@ -778,11 +811,11 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[6] |= c;
- if (nss < 2)
+ if (sts < 2)
return;
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
- elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+ elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
if (vif != NL80211_IFTYPE_AP)
return;
@@ -791,12 +824,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
/* num_snd_dim
- * for mt7915, max supported nss is 2 for bw > 80MHz
+ * for mt7915, max supported sts is 2 for bw > 80MHz and 0 if dbdc
*/
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
- nss - 1) |
- FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
- nss_160 - 1);
+ sts - 1);
+ if (sts_160)
+ c |= FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ sts_160 - 1);
elem->phy_cap_info[5] |= c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
@@ -836,16 +870,19 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data)
{
struct mt7915_dev *dev = phy->dev;
- int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
+ int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
u8 nss_160;
- /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
- if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ if (!is_mt7915(&dev->mt76))
+ nss_160 = nss;
+ else if (!dev->dbdc_support)
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
nss_160 = nss / 2;
else
- nss_160 = nss;
+ /* Can't do 160MHz with mt7915 dbdc */
+ nss_160 = 0;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -891,11 +928,14 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- else
+ else if (nss_160)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ else
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
he_cap_elem->phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
@@ -949,9 +989,11 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ if (nss_160)
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
@@ -969,7 +1011,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
- mt7915_set_stream_he_txbf_caps(dev, he_cap, i, nss);
+ mt7915_set_stream_he_txbf_caps(phy, he_cap, i);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@@ -1078,6 +1120,8 @@ int mt7915_register_device(struct mt7915_dev *dev)
init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
+ INIT_WORK(&dev->dump_work, mt7915_mac_dump_work);
+ mutex_init(&dev->dump_mutex);
dev->dbdc_support = mt7915_band_config(dev);
@@ -1118,7 +1162,15 @@ int mt7915_register_device(struct mt7915_dev *dev)
goto unreg_thermal;
}
- mt7915_init_debugfs(&dev->phy);
+ dev->recovery.hw_init_done = true;
+
+ ret = mt7915_init_debugfs(&dev->phy);
+ if (ret)
+ goto unreg_thermal;
+
+ ret = mt7915_coredump_register(dev);
+ if (ret)
+ goto unreg_thermal;
return 0;
@@ -1137,6 +1189,7 @@ free_phy2:
void mt7915_unregister_device(struct mt7915_dev *dev)
{
mt7915_unregister_ext_phy(dev);
+ mt7915_coredump_unregister(dev);
mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
mt7915_stop_hardware(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index a4bcc617c1a3..f0d5a3603902 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -3,12 +3,13 @@
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
+#include "coredump.h"
#include "mt7915.h"
#include "../dma.h"
#include "mac.h"
#include "mcu.h"
-#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+#define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
@@ -118,6 +119,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
bool clear = false;
u32 addr, val;
u16 idx;
+ s8 rssi[4];
u8 bw;
spin_lock_bh(&dev->sta_poll_lock);
@@ -131,6 +133,8 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
spin_unlock_bh(&dev->sta_poll_lock);
idx = msta->wcid.idx;
+
+ /* refresh peer's airtime reporting */
addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -161,9 +165,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt76_connac_lmac_mapping(i);
- u32 tx_cur = tx_time[q];
- u32 rx_cur = rx_time[q];
+ u8 queue = mt76_connac_lmac_mapping(i);
+ u32 tx_cur = tx_time[queue];
+ u32 rx_cur = rx_time[queue];
u8 tid = ac_to_tid[i];
if (!tx_cur && !rx_cur)
@@ -209,13 +213,69 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
else
rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
}
+
+ /* get signal strength of resp frames (CTS/BA/ACK) */
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30);
+ val = mt76_rr(dev, addr);
+
+ rssi[0] = to_rssi(GENMASK(7, 0), val);
+ rssi[1] = to_rssi(GENMASK(15, 8), val);
+ rssi[2] = to_rssi(GENMASK(23, 16), val);
+ rssi[3] = to_rssi(GENMASK(31, 14), val);
+
+ msta->ack_signal =
+ mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+
+ ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
}
rcu_read_unlock();
}
+void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ u32 addr;
+
+ addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
+ if (enable)
+ mt76_set(dev, addr, BIT(5));
+ else
+ mt76_clear(dev, addr, BIT(5));
+}
+
+static void
+mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
+ struct mt7915_sta *msta, struct sk_buff *skb,
+ u32 info)
+{
+ struct ieee80211_vif *vif;
+ struct wireless_dev *wdev;
+
+ if (!msta || !msta->vif)
+ return;
+
+ if (!(q->flags & MT_QFLAG_WED) ||
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX)
+ return;
+
+ if (!(info & MT_DMA_INFO_PPE_VLD))
+ return;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ wdev = ieee80211_vif_to_wdev(vif);
+ skb->dev = wdev->netdev;
+
+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
+}
+
static int
-mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
+ enum mt76_rxq_id q, u32 *info)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -242,7 +302,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
- if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
+ if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) {
mphy = dev->mt76.phys[MT_BAND1];
if (!mphy)
return -EINVAL;
@@ -482,6 +542,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
} else {
status->flag |= RX_FLAG_8023;
+ mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
+ *info);
}
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
@@ -513,7 +575,7 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
int i;
band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
- if (band_idx && !phy->band_idx) {
+ if (band_idx && !phy->mt76->band_idx) {
phy = mt7915_ext_phy(dev);
if (!phy)
goto out;
@@ -905,17 +967,19 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
- if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end))
- return;
for (cur_info = tx_info; count < total; cur_info++) {
- u32 msdu, info = le32_to_cpu(*cur_info);
+ u32 msdu, info;
u8 i;
+ if (WARN_ON_ONCE((void *)cur_info >= end))
+ return;
+
/*
* 1'b1: new wcid pair.
* 1'b0: msdu_id with the same 'wcid pair' as above.
*/
+ info = le32_to_cpu(*cur_info);
if (info & MT_TX_FREE_PAIR) {
struct mt7915_sta *msta;
struct mt76_wcid *wcid;
@@ -1063,7 +1127,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
}
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *info)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
@@ -1097,7 +1161,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_NORMAL:
- if (!mt7915_mac_fill_rx(dev, skb)) {
+ if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
mt76_rx(&dev->mt76, q, skb);
return;
}
@@ -1111,7 +1175,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx);
+ u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx);
mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
mt76_set(dev, reg, BIT(11) | BIT(9));
@@ -1123,19 +1187,15 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy)
int i;
for (i = 0; i < 4; i++) {
- mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
- mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
+ mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i));
}
- i = 0;
phy->mt76->survey_time = ktime_get_boottime();
- if (phy->band_idx)
- i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
-
- memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
+ memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
/* reset airtime counters */
- mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx),
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx),
MT_WF_RMAC_MIB_RXTIME_CLR);
mt7915_mcu_get_chan_mib_info(phy, true);
@@ -1151,7 +1211,8 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
- int offset;
+ u8 band = phy->mt76->band_idx;
+ int eifs_ofdm = 360, sifs = 10, offset;
bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
@@ -1161,7 +1222,7 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
coverage_class = max_t(s16, dev->phy.coverage_class,
ext_phy->coverage_class);
- mt76_set(dev, MT_ARB_SCR(phy->band_idx),
+ mt76_set(dev, MT_ARB_SCR(band),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
udelay(1);
@@ -1169,39 +1230,48 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
- mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
- mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
- mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
- FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
+ if (!is_mt7915(&dev->mt76)) {
+ if (!a_band) {
+ mt76_wr(dev, MT_TMAC_ICR1(band),
+ FIELD_PREP(MT_IFS_EIFS_CCK, 314));
+ eifs_ofdm = 78;
+ } else {
+ eifs_ofdm = 84;
+ }
+ } else if (a_band) {
+ sifs = 16;
+ }
+
+ mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(band),
+ FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
FIELD_PREP(MT_IFS_RIFS, 2) |
- FIELD_PREP(MT_IFS_SIFS, 10) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
- mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
- FIELD_PREP(MT_IFS_EIFS_CCK, 314));
-
if (phy->slottime < 20 || a_band)
val = MT7915_CFEND_RATE_DEFAULT;
else
val = MT7915_CFEND_RATE_11B;
- mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val);
- mt76_clear(dev, MT_ARB_SCR(phy->band_idx),
+ mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(band),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
-void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
+void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band)
{
u32 reg;
- reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
- MT_WF_PHY_RXTD12_MT7916(ext_phy);
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) :
+ MT_WF_PHY_RXTD12_MT7916(band);
mt76_set(dev, reg,
MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
MT_WF_PHY_RXTD12_IRPI_SW_CLR);
- reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
- MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) :
+ MT_WF_PHY_RX_CTRL1_MT7916(band);
mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
}
@@ -1239,7 +1309,7 @@ void mt7915_update_channel(struct mt76_phy *mphy)
mt7915_mcu_get_chan_mib_info(phy, false);
- nf = mt7915_phy_get_nf(phy, phy->band_idx);
+ nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx);
if (!phy->noise)
phy->noise = nf << 4;
else if (nf)
@@ -1254,7 +1324,7 @@ mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
bool ret;
ret = wait_event_timeout(dev->reset_wait,
- (READ_ONCE(dev->reset_state) & state),
+ (READ_ONCE(dev->recovery.state) & state),
MT7915_RESET_TIMEOUT);
WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
@@ -1295,85 +1365,180 @@ mt7915_update_beacons(struct mt7915_dev *dev)
mt7915_update_vif_beacon, mphy_ext->hw);
}
-static void
-mt7915_dma_reset(struct mt7915_dev *dev)
+void mt7915_tx_token_put(struct mt7915_dev *dev)
{
- struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
- u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
- int i;
+ struct mt76_txwi_cache *txwi;
+ int id;
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ spin_lock_bh(&dev->mt76.token_lock);
+ idr_for_each_entry(&dev->mt76.token, txwi, id) {
+ mt7915_txwi_free(dev, txwi, NULL, NULL);
+ dev->mt76.token_count--;
+ }
+ spin_unlock_bh(&dev->mt76.token_lock);
+ idr_destroy(&dev->mt76.token);
+}
+
+static int
+mt7915_mac_restart(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt76_dev *mdev = &dev->mt76;
+ int i, ret;
+
+ ext_phy = dev->mt76.phys[MT_BAND1];
+ phy2 = ext_phy ? ext_phy->priv : NULL;
- if (is_mt7915(&dev->mt76))
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN);
if (dev->hif2) {
- mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
+ }
- if (is_mt7915(&dev->mt76))
- mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ if (dev_is_pci(mdev->dev)) {
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
+ }
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ if (ext_phy) {
+ set_bit(MT76_RESET, &ext_phy->state);
+ set_bit(MT76_MCU_RESET, &ext_phy->state);
}
- usleep_range(1000, 2000);
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
- for (i = 0; i < __MT_TXQ_MAX; i++) {
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
- if (mphy_ext)
- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
+ /* disable all tx/rx napi */
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ mt76_for_each_q_rx(mdev, i) {
+ if (mdev->q_rx[i].ndesc)
+ napi_disable(&dev->mt76.napi[i]);
}
+ napi_disable(&dev->mt76.tx_napi);
- for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+ /* token reinit */
+ mt7915_tx_token_put(dev);
+ idr_init(&dev->mt76.token);
- mt76_for_each_q_rx(&dev->mt76, i)
- mt76_queue_rx_reset(dev, i);
+ mt7915_dma_reset(dev, true);
- mt76_tx_status_check(&dev->mt76, true);
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ if (mdev->q_rx[i].ndesc) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+ }
+ local_bh_enable();
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
- /* re-init prefetch settings after reset */
- mt7915_dma_prefetch(dev);
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- if (is_mt7915(&dev->mt76))
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN |
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
+ }
+ if (dev_is_pci(mdev->dev)) {
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ if (dev->hif2)
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ }
+
+ /* load firmware */
+ ret = mt7915_mcu_init_firmware(dev);
+ if (ret)
+ goto out;
+
+ /* set the necessary init items */
+ ret = mt7915_mcu_set_eeprom(dev);
+ if (ret)
+ goto out;
+
+ mt7915_mac_init(dev);
+ mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ ret = mt7915_txbf_init(dev);
- if (is_mt7915(&dev->mt76))
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN |
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
+ ret = mt7915_run(dev->mphy.hw);
+ if (ret)
+ goto out;
+ }
+
+ if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) {
+ ret = mt7915_run(ext_phy->hw);
+ if (ret)
+ goto out;
}
+
+out:
+ /* reset done */
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ if (phy2)
+ clear_bit(MT76_RESET, &phy2->mt76->state);
+
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return ret;
}
-void mt7915_tx_token_put(struct mt7915_dev *dev)
+static void
+mt7915_mac_full_reset(struct mt7915_dev *dev)
{
- struct mt76_txwi_cache *txwi;
- int id;
+ struct mt76_phy *ext_phy;
+ int i;
- spin_lock_bh(&dev->mt76.token_lock);
- idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7915_txwi_free(dev, txwi, NULL, NULL);
- dev->mt76.token_count--;
+ ext_phy = dev->mt76.phys[MT_BAND1];
+
+ dev->recovery.hw_full_reset = true;
+
+ wake_up(&dev->mt76.mcu.wait);
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+ if (ext_phy)
+ cancel_delayed_work_sync(&ext_phy->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ for (i = 0; i < 10; i++) {
+ if (!mt7915_mac_restart(dev))
+ break;
}
- spin_unlock_bh(&dev->mt76.token_lock);
- idr_destroy(&dev->mt76.token);
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (i == 10)
+ dev_err(dev->mt76.dev, "chip full reset failed\n");
+
+ ieee80211_restart_hw(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_restart_hw(ext_phy->hw);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ dev->recovery.hw_full_reset = false;
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+ MT7915_WATCHDOG_TIME);
+ if (ext_phy)
+ ieee80211_queue_delayed_work(ext_phy->hw,
+ &ext_phy->mac_work,
+ MT7915_WATCHDOG_TIME);
}
/* system error recovery */
@@ -1388,7 +1553,33 @@ void mt7915_mac_reset_work(struct work_struct *work)
ext_phy = dev->mt76.phys[MT_BAND1];
phy2 = ext_phy ? ext_phy->priv : NULL;
- if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
+ /* chip full reset */
+ if (dev->recovery.restart) {
+ /* disable WA/WM WDT */
+ mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
+ MT_MCU_CMD_WDT_MASK);
+
+ if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
+ dev->recovery.wa_reset_count++;
+ else
+ dev->recovery.wm_reset_count++;
+
+ mt7915_mac_full_reset(dev);
+
+ /* enable mcu irq */
+ mt7915_irq_enable(dev, MT_INT_MCU_CMD);
+ mt7915_irq_disable(dev, 0);
+
+ /* enable WA/WM WDT */
+ mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
+
+ dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
+ dev->recovery.restart = false;
+ return;
+ }
+
+ /* chip partial reset */
+ if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
ieee80211_stop_queues(mt76_hw(dev));
@@ -1413,7 +1604,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
- mt7915_dma_reset(dev);
+ mt7915_dma_reset(dev, false);
mt7915_tx_token_put(dev);
idr_init(&dev->mt76.token);
@@ -1462,132 +1653,227 @@ void mt7915_mac_reset_work(struct work_struct *work)
MT7915_WATCHDOG_TIME);
}
+/* firmware coredump */
+void mt7915_mac_dump_work(struct work_struct *work)
+{
+ const struct mt7915_mem_region *mem_region;
+ struct mt7915_crash_data *crash_data;
+ struct mt7915_dev *dev;
+ struct mt7915_mem_hdr *hdr;
+ size_t buf_len;
+ int i;
+ u32 num;
+ u8 *buf;
+
+ dev = container_of(work, struct mt7915_dev, dump_work);
+
+ mutex_lock(&dev->dump_mutex);
+
+ crash_data = mt7915_coredump_new(dev);
+ if (!crash_data) {
+ mutex_unlock(&dev->dump_mutex);
+ goto skip_coredump;
+ }
+
+ mem_region = mt7915_coredump_get_mem_layout(dev, &num);
+ if (!mem_region || !crash_data->memdump_buf_len) {
+ mutex_unlock(&dev->dump_mutex);
+ goto skip_memdump;
+ }
+
+ buf = crash_data->memdump_buf;
+ buf_len = crash_data->memdump_buf_len;
+
+ /* dumping memory content... */
+ memset(buf, 0, buf_len);
+ for (i = 0; i < num; i++) {
+ if (mem_region->len > buf_len) {
+ dev_warn(dev->mt76.dev, "%s len %lu is too large\n",
+ mem_region->name,
+ (unsigned long)mem_region->len);
+ break;
+ }
+
+ /* reserve space for the header */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ mt7915_memcpy_fromio(dev, buf, mem_region->start,
+ mem_region->len);
+
+ hdr->start = mem_region->start;
+ hdr->len = mem_region->len;
+
+ if (!mem_region->len)
+ /* note: the header remains, just with zero length */
+ break;
+
+ buf += mem_region->len;
+ buf_len -= mem_region->len;
+
+ mem_region++;
+ }
+
+ mutex_unlock(&dev->dump_mutex);
+
+skip_memdump:
+ mt7915_coredump_submit(dev);
+skip_coredump:
+ queue_work(dev->mt76.wq, &dev->reset_work);
+}
+
+void mt7915_reset(struct mt7915_dev *dev)
+{
+ if (!dev->recovery.hw_init_done)
+ return;
+
+ if (dev->recovery.hw_full_reset)
+ return;
+
+ /* wm/wa exception: do full recovery */
+ if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
+ dev->recovery.restart = true;
+ dev_info(dev->mt76.dev,
+ "%s indicated firmware crash, attempting recovery\n",
+ wiphy_name(dev->mt76.hw->wiphy));
+
+ mt7915_irq_disable(dev, MT_INT_MCU_CMD);
+ queue_work(dev->mt76.wq, &dev->dump_work);
+ return;
+ }
+
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+}
+
void mt7915_mac_update_stats(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
- int i, aggr0, aggr1, cnt;
+ int i, aggr0 = 0, aggr1, cnt;
+ u8 band = phy->mt76->band_idx;
u32 val;
- cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR3(band));
mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR4(band));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR5(band));
mib->rx_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR6(band));
mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR7(band));
mib->rx_vector_mismatch_cnt +=
FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR8(band));
mib->rx_delimiter_fail_cnt +=
FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR10(band));
mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR11(band));
mib->rx_len_mismatch_cnt +=
FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR12(band));
mib->tx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR13(band));
mib->tx_stop_q_empty_cnt +=
FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR14(band));
mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR15(band));
mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR16(band));
mib->primary_cca_busy_time +=
FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR17(band));
mib->secondary_cca_busy_time +=
FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR18(band));
mib->primary_energy_detect_time +=
FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR19(band));
mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR20(band));
mib->ofdm_mdrdy_time +=
FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR21(band));
mib->green_mdrdy_time +=
FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR22(band));
mib->rx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR23(band));
mib->rx_ampdu_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR24(band));
mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR25(band));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR27(band));
mib->tx_rwp_fail_cnt +=
FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR28(band));
mib->tx_rwp_need_cnt +=
FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR29(band));
mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDRVEC(band));
mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR31(band));
mib->rx_ba_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
- cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_DR8(band));
mib->tx_mu_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_DR9(band));
mib->tx_mu_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_DR11(band));
mib->tx_su_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx));
+ cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band));
mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
@@ -1598,44 +1884,43 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_amsdu_cnt += cnt;
}
- aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
if (is_mt7915(&dev->mt76)) {
- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
+ for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4)));
mib->ba_miss_cnt +=
FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
mib->ack_fail_cnt +=
FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
- val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4)));
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4)));
mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
mib->rts_retries_cnt +=
FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
- dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
+ phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr0++] += val >> 16;
- val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
- dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i));
+ phy->mt76->aggr_stats[aggr1++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr1++] += val >> 16;
}
- cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR32(band));
mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR33(band));
mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
- cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx));
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band));
mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
- cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx));
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band));
mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx));
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band));
mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
@@ -1643,51 +1928,51 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
} else {
for (i = 0; i < 2; i++) {
/* rts count */
- val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2)));
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2)));
mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
/* rts retry count */
- val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2)));
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2)));
mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
/* ba miss count */
- val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2)));
+ val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2)));
mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
/* ack fail count */
- val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2)));
+ val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2)));
mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
}
for (i = 0; i < 8; i++) {
- val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
- dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
- dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
+ val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
+ phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
+ phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
}
- cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR32(band));
mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
- cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_BFCR7(band));
mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
- cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_BFCR2(band));
mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
- cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_BFCR0(band));
mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
- cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_BFCR1(band));
mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
}
@@ -1696,7 +1981,6 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
static void mt7915_mac_severe_check(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u32 trb;
if (!phy->omac_mask)
@@ -1706,7 +1990,7 @@ static void mt7915_mac_severe_check(struct mt7915_phy *phy)
* stopping Rx, so check status periodically to see if TRB hardware
* requires minimal recovery.
*/
- trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx));
+ trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx));
if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
@@ -1714,7 +1998,7 @@ static void mt7915_mac_severe_check(struct mt7915_phy *phy)
FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
trb == phy->trb_ts)
mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
- ext_phy);
+ phy->mt76->band_idx);
phy->trb_ts = trb;
}
@@ -1816,6 +2100,13 @@ static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
if (err < 0)
return err;
+ if (is_mt7915(&dev->mt76)) {
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
+ 0, dev->dbdc_support ? 2 : 0);
+ if (err < 0)
+ return err;
+ }
+
return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
MT_RX_SEL0, 1);
}
@@ -1827,16 +2118,16 @@ static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
int err;
/* start CAC */
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
if (err < 0)
return err;
- err = mt7915_dfs_start_rdd(dev, phy->band_idx);
+ err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
if (err < 0)
return err;
- phy->rdd_state |= BIT(phy->band_idx);
+ phy->rdd_state |= BIT(phy->mt76->band_idx);
if (!is_mt7915(&dev->mt76))
return 0;
@@ -1921,7 +2212,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
return 0;
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
- phy->band_idx, MT_RX_SEL0, 0);
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
@@ -1932,10 +2223,18 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
stop:
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
- phy->band_idx, MT_RX_SEL0, 0);
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
if (err < 0)
return err;
+ if (is_mt7915(&dev->mt76)) {
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
+ phy->mt76->band_idx, 0,
+ dev->dbdc_support ? 2 : 0);
+ if (err < 0)
+ return err;
+ }
+
mt7915_dfs_stop_radar_detector(phy);
phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 89b519cfd14c..0511d6a505b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -20,45 +20,45 @@ static bool mt7915_dev_running(struct mt7915_dev *dev)
return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
}
-static int mt7915_start(struct ieee80211_hw *hw)
+int mt7915_run(struct ieee80211_hw *hw)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool running;
int ret;
- flush_work(&dev->init_work);
-
- mutex_lock(&dev->mt76.mutex);
-
running = mt7915_dev_running(dev);
if (!running) {
- ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
+ ret = mt76_connac_mcu_set_pm(&dev->mt76,
+ dev->phy.mt76->band_idx, 0);
if (ret)
goto out;
- ret = mt7915_mcu_set_mac(dev, 0, true, true);
+ ret = mt7915_mcu_set_mac(dev, dev->phy.mt76->band_idx,
+ true, true);
if (ret)
goto out;
- mt7915_mac_enable_nf(dev, 0);
+ mt7915_mac_enable_nf(dev, dev->phy.mt76->band_idx);
}
- if (phy != &dev->phy || phy->band_idx) {
- ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
+ if (phy != &dev->phy) {
+ ret = mt76_connac_mcu_set_pm(&dev->mt76,
+ phy->mt76->band_idx, 0);
if (ret)
goto out;
- ret = mt7915_mcu_set_mac(dev, 1, true, true);
+ ret = mt7915_mcu_set_mac(dev, phy->mt76->band_idx,
+ true, true);
if (ret)
goto out;
- mt7915_mac_enable_nf(dev, 1);
+ mt7915_mac_enable_nf(dev, phy->mt76->band_idx);
}
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
- phy != &dev->phy);
+ phy->mt76->band_idx);
if (ret)
goto out;
@@ -80,6 +80,18 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_reset_counters(phy);
out:
+ return ret;
+}
+
+static int mt7915_start(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ int ret;
+
+ flush_work(&dev->init_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt7915_run(hw);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -99,13 +111,13 @@ static void mt7915_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
- mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
- mt7915_mcu_set_mac(dev, 1, false, false);
+ mt76_connac_mcu_set_pm(&dev->mt76, phy->mt76->band_idx, 1);
+ mt7915_mcu_set_mac(dev, phy->mt76->band_idx, false, false);
}
if (!mt7915_dev_running(dev)) {
- mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
- mt7915_mcu_set_mac(dev, 0, false, false);
+ mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.mt76->band_idx, 1);
+ mt7915_mcu_set_mac(dev, dev->phy.mt76->band_idx, false, false);
}
mutex_unlock(&dev->mt76.mutex);
@@ -209,7 +221,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
}
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
- mvif->mt76.band_idx = phy->band_idx;
+ mvif->mt76.band_idx = phy->mt76->band_idx;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@@ -432,7 +444,6 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
- bool band = phy != &dev->phy;
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -460,6 +471,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+ bool band = phy->mt76->band_idx;
if (!enabled)
phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
@@ -498,7 +510,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
- bool band = phy != &dev->phy;
+ bool band = phy->mt76->band_idx;
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -593,10 +605,11 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_mcu_add_sta(dev, vif, NULL, join);
}
- if (changed & BSS_CHANGED_ASSOC) {
+ if (changed & BSS_CHANGED_ASSOC)
mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
- mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
- }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
@@ -617,7 +630,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_mcu_set_tx(dev, vif);
if (changed & BSS_CHANGED_HE_OBSS_PD)
- mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
+ mt7915_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR)
mt7915_update_bss_color(hw, vif, &info->he_bss_color);
@@ -665,6 +678,8 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -732,7 +747,8 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
int ret;
mutex_lock(&dev->mt76.mutex);
- ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val,
+ phy->mt76->band_idx);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -835,7 +851,7 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
- bool band = phy != &dev->phy;
+ bool band = phy->mt76->band_idx;
union {
u64 t64;
u32 t32[2];
@@ -880,7 +896,7 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
- bool band = phy != &dev->phy;
+ bool band = phy->mt76->band_idx;
union {
u64 t64;
u32 t32[2];
@@ -911,7 +927,7 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
- bool band = phy != &dev->phy;
+ bool band = phy->mt76->band_idx;
union {
u64 t64;
u32 t32[2];
@@ -953,22 +969,21 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
int max_nss = hweight8(hw->wiphy->available_antennas_tx);
- bool ext_phy = phy != &dev->phy;
+ u8 chainshift = dev->chainshift;
+ u8 band = phy->mt76->band_idx;
if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
return -EINVAL;
- if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
- tx_ant = BIT(ffs(tx_ant) - 1) - 1;
-
mutex_lock(&dev->mt76.mutex);
phy->mt76->antenna_mask = tx_ant;
- if (ext_phy)
- tx_ant <<= dev->chainshift;
-
- phy->mt76->chainmask = tx_ant;
+ /* handle a variant of mt7916 which has 3T3R but nss2 on 5 GHz band */
+ if (is_mt7916(&dev->mt76) && band && hweight8(tx_ant) == max_nss)
+ phy->mt76->chainmask = (dev->chainmask >> chainshift) << chainshift;
+ else
+ phy->mt76->chainmask = tx_ant << (chainshift * band);
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
@@ -1026,7 +1041,21 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
sinfo->tx_retries = msta->wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+
+ if (mtk_wed_get_rx_capa(&phy->dev->mt76.mmio.wed)) {
+ sinfo->rx_bytes = msta->wcid.stats.rx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
+
+ sinfo->rx_packets = msta->wcid.stats.rx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
+ }
}
+
+ sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
@@ -1111,6 +1140,39 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
+static int mt7915_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ s16 txpower = sta->deflink.txpwr.power;
+ int ret;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC)
+ txpower = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /* NOTE: temporarily use 0 as minimum limit, which is a
+ * global setting and will be applied to all stations.
+ */
+ ret = mt7915_mcu_set_txpower_frame_min(phy, 0);
+ if (ret)
+ goto out;
+
+ /* This only applies to data frames while pushing traffic,
+ * whereas the management frames or other packets that are
+ * using fixed rate can be configured via TxD.
+ */
+ ret = mt7915_mcu_set_txpower_frame(phy, vif, sta, txpower);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_ampdu_cnt",
"tx_stop_q_empty_cnt",
@@ -1258,7 +1320,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
- int i, n, ei = 0;
+ int i, ei = 0;
mutex_lock(&dev->mt76.mutex);
@@ -1274,9 +1336,8 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->tx_pkt_ibf_cnt;
/* Tx ampdu stat */
- n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
- data[ei++] = dev->mt76.aggr_stats[i + n];
+ data[ei++] = phy->mt76->aggr_stats[i];
data[ei++] = phy->mib.ba_miss_cnt;
@@ -1431,7 +1492,7 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
path->mtk_wdma.bss = mvif->mt76.idx;
- path->mtk_wdma.wcid = msta->wcid.idx;
+ path->mtk_wdma.wcid = is_mt7915(&dev->mt76) ? msta->wcid.idx : 0x3ff;
path->mtk_wdma.queue = phy != &dev->phy;
ctx->dev = NULL;
@@ -1477,6 +1538,7 @@ const struct ieee80211_ops mt7915_ops = {
.set_bitrate_mask = mt7915_set_bitrate_mask,
.set_coverage_class = mt7915_set_coverage_class,
.sta_statistics = mt7915_sta_statistics,
+ .sta_set_txpwr = mt7915_sta_set_txpwr,
.sta_set_4addr = mt7915_sta_set_4addr,
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
.add_twt_setup = mt7915_mac_add_twt_setup,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 8d297e4aa7d4..b2652de082ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -32,6 +32,10 @@
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+static bool sr_scene_detect = true;
+module_param(sr_scene_detect, bool, 0644);
+MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm");
+
static u8
mt7915_mcu_get_sta_nss(u16 mcs_map)
{
@@ -228,7 +232,8 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
- if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ if ((c->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -247,7 +252,8 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
- if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
phy = (struct mt7915_phy *)mphy->priv;
@@ -262,7 +268,8 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ if ((r->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
if (r->band_idx == MT_RX_SEL2)
@@ -319,7 +326,7 @@ mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
b = (struct mt7915_mcu_bcc_notify *)skb->data;
- if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ if ((b->band_idx && !dev->phy.mt76->band_idx) && dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -485,7 +492,7 @@ static void
mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct mt7915_phy *phy)
{
- int max_nss = hweight8(phy->mt76->chainmask);
+ int max_nss = hweight8(phy->mt76->antenna_mask);
struct bss_info_ra *ra;
struct tlv *tlv;
@@ -595,7 +602,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif,
.mode = !!mask || enable,
.entry_count = 1,
.write = 1,
- .band = phy != &dev->phy,
+ .band = phy->mt76->band_idx,
.index = idx * 2 + bssid,
};
@@ -1131,7 +1138,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
- bf->ncol_bw160 = nss_mcs;
+ bf->ncol_gt_bw80 = nss_mcs;
}
if (pe->phy_cap_info[0] &
@@ -1139,10 +1146,10 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
- if (bf->ncol_bw160)
- bf->ncol_bw160 = min_t(u8, bf->ncol_bw160, nss_mcs);
+ if (bf->ncol_gt_bw80)
+ bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs);
else
- bf->ncol_bw160 = nss_mcs;
+ bf->ncol_gt_bw80 = nss_mcs;
}
snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
@@ -1150,7 +1157,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
pe->phy_cap_info[4]);
- bf->nrow_bw160 = min_t(int, snd_dim, sts);
+ bf->nrow_gt_bw80 = min_t(int, snd_dim, sts);
}
static void
@@ -1306,6 +1313,9 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
case RATE_PARAM_MMPS_UPDATE:
ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
break;
+ case RATE_PARAM_SPE_UPDATE:
+ ra->spe_idx = *(u8 *)data;
+ break;
default:
break;
}
@@ -1349,6 +1359,18 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
static int
+mt7915_mcu_set_spe_idx(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask);
+
+ return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &spe_idx,
+ RATE_PARAM_SPE_UPDATE);
+}
+
+static int
mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -1435,7 +1457,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
return ret;
}
- return 0;
+ return mt7915_mcu_set_spe_idx(dev, vif, sta);
}
static void
@@ -1662,10 +1684,32 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return ret;
}
out:
+ ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb);
+ if (ret)
+ return ret;
+
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
+int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct {
+ __le32 args[2];
+ } req = {
+ .args[0] = cpu_to_le32(1),
+ .args[1] = cpu_to_le32(6),
+ };
+
+ return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL,
+ &req, sizeof(req));
+#else
+ return 0;
+#endif
+}
+
int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, bool enable)
{
@@ -1674,7 +1718,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
struct {
struct req_hdr {
u8 omac_idx;
- u8 dbdc_idx;
+ u8 band_idx;
__le16 tlv_num;
u8 is_tlv_append;
u8 rsv[3];
@@ -1683,13 +1727,13 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
__le16 tag;
__le16 len;
u8 active;
- u8 dbdc_idx;
+ u8 band_idx;
u8 omac_addr[ETH_ALEN];
} __packed tlv;
} data = {
.hdr = {
.omac_idx = mvif->mt76.omac_idx,
- .dbdc_idx = mvif->mt76.band_idx,
+ .band_idx = mvif->mt76.band_idx,
.tlv_num = cpu_to_le16(1),
.is_tlv_append = 1,
},
@@ -1697,7 +1741,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
- .dbdc_idx = mvif->mt76.band_idx,
+ .band_idx = mvif->mt76.band_idx,
},
};
@@ -2151,7 +2195,7 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
u8 band_idx;
} req = {
.cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
- .band_idx = phy->band_idx,
+ .band_idx = phy->mt76->band_idx,
};
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
@@ -2234,18 +2278,10 @@ mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
sizeof(req), true);
}
-int mt7915_mcu_init(struct mt7915_dev *dev)
+int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
{
- static const struct mt76_mcu_ops mt7915_mcu_ops = {
- .headroom = sizeof(struct mt76_connac2_mcu_txd),
- .mcu_skb_send_msg = mt7915_mcu_send_message,
- .mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
- };
int ret;
- dev->mt76.mcu_ops = &mt7915_mcu_ops;
-
/* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
@@ -2274,7 +2310,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
if (ret)
return ret;
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && is_mt7915(&dev->mt76))
mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
ret = mt7915_mcu_set_mwds(dev, 1);
@@ -2294,6 +2330,20 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
MCU_WA_PARAM_RED, 0, 0);
}
+int mt7915_mcu_init(struct mt7915_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .headroom = sizeof(struct mt76_connac2_mcu_txd),
+ .mcu_skb_send_msg = mt7915_mcu_send_message,
+ .mcu_parse_response = mt7915_mcu_parse_response,
+ .mcu_restart = mt76_connac_mcu_restart,
+ };
+
+ dev->mt76.mcu_ops = &mt7915_mcu_ops;
+
+ return mt7915_mcu_init_firmware(dev);
+}
+
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
@@ -2538,7 +2588,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
req.monitor_central_chan =
ieee80211_frequency_to_channel(chandef->center_freq1);
req.monitor_bw = mt76_connac_chan_bw(chandef);
- req.band_idx = phy != &dev->phy;
+ req.band_idx = phy->mt76->band_idx;
req.scan_mode = 1;
break;
}
@@ -2546,7 +2596,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
req.monitor_chan = chandef->chan->hw_value;
req.monitor_central_chan =
ieee80211_frequency_to_channel(chandef->center_freq1);
- req.band_idx = phy != &dev->phy;
+ req.band_idx = phy->mt76->band_idx;
req.scan_mode = 2;
break;
case CH_SWITCH_BACKGROUND_SCAN_STOP:
@@ -2613,12 +2663,13 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1;
+ u8 band = phy->mt76->band_idx;
struct {
u8 control_ch;
u8 center_ch;
u8 bw;
- u8 tx_streams_num;
- u8 rx_streams; /* mask or num */
+ u8 tx_path_num;
+ u8 rx_path; /* mask or num */
u8 switch_reason;
u8 band_idx;
u8 center_ch2; /* for 80+80 only */
@@ -2634,25 +2685,23 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
.bw = mt76_connac_chan_bw(chandef),
- .tx_streams_num = hweight8(phy->mt76->antenna_mask),
- .rx_streams = phy->mt76->antenna_mask,
- .band_idx = phy->band_idx,
+ .tx_path_num = hweight16(phy->mt76->chainmask),
+ .rx_path = phy->mt76->chainmask >> (dev->chainshift * band),
+ .band_idx = band,
.channel_band = ch_band[chandef->chan->band],
};
#ifdef CONFIG_NL80211_TESTMODE
if (phy->mt76->test.tx_antenna_mask &&
- (phy->mt76->test.state == MT76_TM_STATE_TX_FRAMES ||
- phy->mt76->test.state == MT76_TM_STATE_RX_FRAMES ||
- phy->mt76->test.state == MT76_TM_STATE_TX_CONT)) {
- req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
- req.rx_streams = phy->mt76->test.tx_antenna_mask;
-
- if (phy != &dev->phy)
- req.rx_streams >>= dev->chainshift;
+ mt76_testmode_enabled(phy->mt76)) {
+ req.tx_path_num = fls(phy->mt76->test.tx_antenna_mask);
+ req.rx_path = phy->mt76->test.tx_antenna_mask;
}
#endif
+ if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
+ req.tx_path_num = fls(phy->mt76->antenna_mask);
+
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
@@ -2665,7 +2714,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
req.switch_reason = CH_SWITCH_NORMAL;
if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
- req.rx_streams = hweight8(req.rx_streams);
+ req.rx_path = hweight8(req.rx_path);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
int freq2 = chandef->center_freq2;
@@ -2927,25 +2976,36 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
/* strict order */
static const u32 offs[] = {
- MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME,
- MIB_BUSY_TIME_V2, MIB_TX_TIME_V2, MIB_RX_TIME_V2,
+ MIB_NON_WIFI_TIME,
+ MIB_TX_TIME,
+ MIB_RX_TIME,
+ MIB_OBSS_AIRTIME,
+ MIB_TXOP_INIT_COUNT,
+ /* v2 */
+ MIB_NON_WIFI_TIME_V2,
+ MIB_TX_TIME_V2,
+ MIB_RX_TIME_V2,
MIB_OBSS_AIRTIME_V2
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
- struct mt7915_mcu_mib *res, req[4];
+ struct mt7915_mcu_mib *res, req[5];
struct sk_buff *skb;
int i, ret, start = 0, ofs = 20;
+ u64 cc_tx;
if (!is_mt7915(&dev->mt76)) {
- start = 4;
+ start = 5;
ofs = 0;
}
- for (i = 0; i < 4; i++) {
- req[i].band = cpu_to_le32(phy != &dev->phy);
+ for (i = 0; i < 5; i++) {
+ req[i].band = cpu_to_le32(phy->mt76->band_idx);
req[i].offs = cpu_to_le32(offs[i + start]);
+
+ if (!is_mt7915(&dev->mt76) && i == 3)
+ break;
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@@ -2955,20 +3015,24 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
res = (struct mt7915_mcu_mib *)(skb->data + ofs);
+#define __res_u64(s) le64_to_cpu(res[s].data)
+ /* subtract Tx backoff time from Tx duration */
+ cc_tx = is_mt7915(&dev->mt76) ? __res_u64(1) - __res_u64(4) : __res_u64(1);
+
if (chan_switch)
goto out;
-#define __res_u64(s) le64_to_cpu(res[s].data)
- state->cc_busy += __res_u64(0) - state_ts->cc_busy;
- state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+ state->cc_tx += cc_tx - state_ts->cc_tx;
state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+ state->cc_busy += __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3) -
+ state_ts->cc_busy;
out:
- state_ts->cc_busy = __res_u64(0);
- state_ts->cc_tx = __res_u64(1);
+ state_ts->cc_tx = cc_tx;
state_ts->cc_bss_rx = __res_u64(2);
state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+ state_ts->cc_busy = __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3);
#undef __res_u64
dev_kfree_skb(skb);
@@ -2982,11 +3046,11 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
struct {
u8 ctrl_id;
u8 action;
- u8 dbdc_idx;
+ u8 band_idx;
u8 rsv[5];
} req = {
.ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
- .dbdc_idx = phy != &dev->phy,
+ .band_idx = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
@@ -3005,7 +3069,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
u8 rsv[2];
} __packed req = {
.ctrl = {
- .band_idx = phy->band_idx,
+ .band_idx = phy->mt76->band_idx,
},
};
int level;
@@ -3045,28 +3109,103 @@ out:
&req, sizeof(req), false);
}
-int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
+int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower)
{
struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 rsv;
+ u8 band_idx;
+ s8 txpower_min;
+ } __packed req = {
+ .format_id = TX_POWER_LIMIT_FRAME_MIN,
+ .band_idx = phy->mt76->band_idx,
+ .txpower_min = txpower * 2, /* 0.5db */
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, s8 txpower)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct mt7915_sku_val {
+ struct {
u8 format_id;
- u8 limit_type;
- u8 dbdc_idx;
- s8 val[MT7915_SKU_RATE_NUM];
+ u8 rsv[3];
+ u8 band_idx;
+ s8 txpower_max;
+ __le16 wcid;
+ s8 txpower_offs[48];
} __packed req = {
- .format_id = 4,
- .dbdc_idx = phy != &dev->phy,
+ .format_id = TX_POWER_LIMIT_FRAME,
+ .band_idx = phy->mt76->band_idx,
+ .txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2),
+ .wcid = cpu_to_le16(msta->wcid.idx),
+ };
+ int ret;
+ s8 txpower_sku[MT7915_SKU_RATE_NUM];
+
+ ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku));
+ if (ret)
+ return ret;
+
+ txpower = mt7915_get_power_bound(phy, txpower);
+ if (txpower > mphy->txpower_cur || txpower < 0)
+ return -EINVAL;
+
+ if (txpower) {
+ u32 offs, len, i;
+
+ if (sta->deflink.ht_cap.ht_supported) {
+ const u8 *sku_len = mt7915_sku_group_len;
+
+ offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM];
+ len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40];
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ offs += len;
+ len = sku_len[SKU_VHT_BW20] * 4;
+
+ if (sta->deflink.he_cap.has_he) {
+ offs += len + sku_len[SKU_HE_RU26] * 3;
+ len = sku_len[SKU_HE_RU242] * 4;
+ }
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++, offs++)
+ req.txpower_offs[i] =
+ DIV_ROUND_UP(txpower - txpower_sku[offs], 2);
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct mt7915_mcu_txpower_sku req = {
+ .format_id = TX_POWER_LIMIT_TABLE,
+ .band_idx = phy->mt76->band_idx,
};
struct mt76_power_limits limits_array;
s8 *la = (s8 *)&limits_array;
- int i, idx, n_chains = hweight8(mphy->antenna_mask);
- int tx_power = hw->conf.power_level * 2;
+ int i, idx;
+ int tx_power;
- tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan,
- tx_power);
- tx_power -= mt76_tx_power_nss_delta(n_chains);
+ tx_power = mt7915_get_power_bound(phy, hw->conf.power_level);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits_array, tx_power);
mphy->txpower_cur = tx_power;
@@ -3085,7 +3224,7 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
}
for (j = 0; j < min_t(u8, mcs_num, len); j++)
- req.val[idx + j] = la[j];
+ req.txpower_sku[idx + j] = la[j];
la += mcs_num;
idx += len;
@@ -3103,14 +3242,14 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
struct {
u8 format_id;
u8 category;
- u8 band;
+ u8 band_idx;
u8 _rsv;
} __packed req = {
- .format_id = 7,
+ .format_id = TX_POWER_LIMIT_INFO,
.category = RATE_POWER_INFO,
- .band = phy != &dev->phy,
+ .band_idx = phy->mt76->band_idx,
};
- s8 res[MT7915_SKU_RATE_NUM][2];
+ s8 txpower_sku[MT7915_SKU_RATE_NUM][2];
struct sk_buff *skb;
int ret, i;
@@ -3120,9 +3259,9 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
if (ret)
return ret;
- memcpy(res, skb->data + 4, sizeof(res));
+ memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku));
for (i = 0; i < len; i++)
- txpower[i] = res[i][req.band];
+ txpower[i] = txpower_sku[i][req.band_idx];
dev_kfree_skb(skb);
@@ -3157,11 +3296,11 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
struct mt7915_sku {
u8 format_id;
u8 sku_enable;
- u8 dbdc_idx;
+ u8 band_idx;
u8 rsv;
} __packed req = {
- .format_id = 0,
- .dbdc_idx = phy != &dev->phy,
+ .format_id = TX_POWER_LIMIT_ENABLE,
+ .band_idx = phy->mt76->band_idx,
.sku_enable = enable,
};
@@ -3236,31 +3375,193 @@ int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
sizeof(req), true);
}
-int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- bool enable)
+static int
+mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_sr_ctrl req = {
+ .action = action,
+ .argnum = 1,
+ .band_idx = phy->mt76->band_idx,
+ .val = cpu_to_le32(val),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt7915_mcu_sr_ctrl ctrl;
+ struct {
+ u8 pd_th_non_srg;
+ u8 pd_th_srg;
+ u8 period_offs;
+ u8 rcpi_src;
+ __le16 obss_pd_min;
+ __le16 obss_pd_min_srg;
+ u8 resp_txpwr_mode;
+ u8 txpwr_restrict_mode;
+ u8 txpwr_ref;
+ u8 rsv[3];
+ } __packed param;
+ } __packed req = {
+ .ctrl = {
+ .action = SPR_SET_PARAM,
+ .argnum = 9,
+ .band_idx = phy->mt76->band_idx,
+ },
+ };
+ int ret;
+ u8 max_th = 82, non_srg_max_th = 62;
+
+ /* disable firmware dynamical PD asjustment */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false);
+ if (ret)
+ return ret;
+
+ if (he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED)
+ req.param.pd_th_non_srg = max_th;
+ else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset;
+ else
+ req.param.pd_th_non_srg = non_srg_max_th;
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ req.param.pd_th_srg = max_th - he_obss_pd->max_offset;
+
+ req.param.obss_pd_min = cpu_to_le16(82);
+ req.param.obss_pd_min_srg = cpu_to_le16(82);
+ req.param.txpwr_restrict_mode = 2;
+ req.param.txpwr_ref = 21;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd)
{
-#define MT_SPR_ENABLE 1
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
+ u8 omac = mvif->mt76.omac_idx;
struct {
- u8 action;
- u8 arg_num;
- u8 band_idx;
- u8 status;
- u8 drop_tx_idx;
- u8 sta_idx; /* 256 sta */
- u8 rsv[2];
- __le32 val;
+ struct mt7915_mcu_sr_ctrl ctrl;
+ struct {
+ u8 omac;
+ u8 rsv[3];
+ u8 flag[20];
+ } __packed siga;
+ } __packed req = {
+ .ctrl = {
+ .action = SPR_SET_SIGA,
+ .argnum = 1,
+ .band_idx = phy->mt76->band_idx,
+ },
+ .siga = {
+ .omac = omac > HW_BSSID_MAX ? omac - 12 : omac,
+ },
+ };
+ int ret;
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED)
+ req.siga.flag[req.siga.omac] = 0xf;
+ else
+ return 0;
+
+ /* switch to normal AP mode */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt7915_mcu_sr_ctrl ctrl;
+ struct {
+ __le32 color_l[2];
+ __le32 color_h[2];
+ __le32 bssid_l[2];
+ __le32 bssid_h[2];
+ } __packed bitmap;
} __packed req = {
- .action = MT_SPR_ENABLE,
- .arg_num = 1,
- .band_idx = mvif->mt76.band_idx,
- .val = cpu_to_le32(enable),
+ .ctrl = {
+ .action = SPR_SET_SRG_BITMAP,
+ .argnum = 4,
+ .band_idx = phy->mt76->band_idx,
+ },
};
+ u32 bitmap;
+
+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+ req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap));
+ req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+ req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap));
+ req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
sizeof(req), true);
}
+int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ int ret;
+
+ /* enable firmware scene detection algorithms */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect);
+ if (ret)
+ return ret;
+
+ /* firmware dynamically adjusts PD threshold so skip manual control */
+ if (sr_scene_detect && !he_obss_pd->enable)
+ return 0;
+
+ /* enable spatial reuse */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable);
+ if (ret)
+ return ret;
+
+ if (sr_scene_detect || !he_obss_pd->enable)
+ return 0;
+
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true);
+ if (ret)
+ return ret;
+
+ /* set SRG/non-SRG OBSS PD threshold */
+ ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd);
+ if (ret)
+ return ret;
+
+ /* Set SR prohibit */
+ ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
+ if (ret)
+ return ret;
+
+ /* set SRG BSS color/BSSID bitmap */
+ return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
+}
+
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate)
{
@@ -3447,8 +3748,8 @@ int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
__le32 ofs;
__le32 data;
} __packed req = {
- .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 28))),
- .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(27, 0))),
+ .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 24))),
+ .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(23, 0))),
.data = set ? cpu_to_le32(*val) : 0,
};
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index cd1edf553fc1..29b5434bfdb8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -129,6 +129,17 @@ struct mt7915_mcu_background_chain_ctrl {
u8 rsv[2];
} __packed;
+struct mt7915_mcu_sr_ctrl {
+ u8 action;
+ u8 argnum;
+ u8 band_idx;
+ u8 status;
+ u8 drop_ta_idx;
+ u8 sta_idx; /* 256 sta */
+ u8 rsv[2];
+ __le32 val;
+} __packed;
+
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
@@ -160,17 +171,26 @@ struct mt7915_mcu_mib {
enum mt7915_chan_mib_offs {
/* mt7915 */
- MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
MIB_OBSS_AIRTIME = 86,
+ MIB_NON_WIFI_TIME,
+ MIB_TXOP_INIT_COUNT,
+
/* mt7916 */
- MIB_BUSY_TIME_V2 = 0,
MIB_TX_TIME_V2 = 6,
MIB_RX_TIME_V2 = 8,
- MIB_OBSS_AIRTIME_V2 = 490
+ MIB_OBSS_AIRTIME_V2 = 490,
+ MIB_NON_WIFI_TIME_V2
};
+struct mt7915_mcu_txpower_sku {
+ u8 format_id;
+ u8 limit_type;
+ u8 band_idx;
+ s8 txpower_sku[MT7915_SKU_RATE_NUM];
+} __packed;
+
struct edca {
u8 queue;
u8 set;
@@ -394,6 +414,7 @@ enum {
RATE_PARAM_FIXED_MCS,
RATE_PARAM_FIXED_GI = 11,
RATE_PARAM_AUTO = 20,
+ RATE_PARAM_SPE_UPDATE = 22,
};
#define RATE_CFG_MCS GENMASK(3, 0)
@@ -406,6 +427,25 @@ enum {
#define RATE_CFG_HE_LTF GENMASK(31, 28)
enum {
+ TX_POWER_LIMIT_ENABLE,
+ TX_POWER_LIMIT_TABLE = 0x4,
+ TX_POWER_LIMIT_INFO = 0x7,
+ TX_POWER_LIMIT_FRAME = 0x11,
+ TX_POWER_LIMIT_FRAME_MIN = 0x12,
+};
+
+enum {
+ SPR_ENABLE = 0x1,
+ SPR_ENABLE_SD = 0x3,
+ SPR_ENABLE_MODE = 0x5,
+ SPR_ENABLE_DPD = 0x23,
+ SPR_ENABLE_TX = 0x25,
+ SPR_SET_SRG_BITMAP = 0x80,
+ SPR_SET_PARAM = 0xc2,
+ SPR_SET_SIGA = 0xdc,
+};
+
+enum {
THERMAL_PROTECT_PARAMETER_CTRL,
THERMAL_PROTECT_BASIC_INFO,
THERMAL_PROTECT_ENABLE,
@@ -447,6 +487,8 @@ enum {
SER_SET_RECOVER_L3_TX_ABORT,
SER_SET_RECOVER_L3_TX_DISABLE,
SER_SET_RECOVER_L3_BF,
+ SER_SET_RECOVER_FULL,
+ SER_SET_SYSTEM_ASSERT,
/* action */
SER_ENABLE = 2,
SER_RECOVER
@@ -474,4 +516,16 @@ enum {
sizeof(struct bss_info_bcn_cont) + \
sizeof(struct bss_info_inband_discovery))
+static inline s8
+mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ int n_chains = hweight8(mphy->antenna_mask);
+
+ txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
+ txpower -= mt76_tx_power_nss_delta(n_chains);
+
+ return txpower;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 7bd5f6725d7b..0a95c3da241b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -9,53 +9,112 @@
#include "mt7915.h"
#include "mac.h"
#include "../trace.h"
+#include "../dma.h"
+
+static bool wed_enable;
+module_param(wed_enable, bool, 0644);
+MODULE_PARM_DESC(wed_enable, "Enable Wireless Ethernet Dispatch support");
static const u32 mt7915_reg[] = {
- [INT_SOURCE_CSR] = 0xd7010,
- [INT_MASK_CSR] = 0xd7014,
- [INT1_SOURCE_CSR] = 0xd7088,
- [INT1_MASK_CSR] = 0xd708c,
- [INT_MCU_CMD_SOURCE] = 0xd51f0,
- [INT_MCU_CMD_EVENT] = 0x3108,
- [WFDMA0_ADDR] = 0xd4000,
- [WFDMA0_PCIE1_ADDR] = 0xd8000,
- [WFDMA_EXT_CSR_ADDR] = 0xd7000,
- [CBTOP1_PHY_END] = 0x77ffffff,
- [INFRA_MCU_ADDR_END] = 0x7c3fffff,
- [FW_EXCEPTION_ADDR] = 0x219848,
- [SWDEF_BASE_ADDR] = 0x41f200,
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+ [INT1_SOURCE_CSR] = 0xd7088,
+ [INT1_MASK_CSR] = 0xd708c,
+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
+ [INT_MCU_CMD_EVENT] = 0x3108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c3fffff,
+ [FW_ASSERT_STAT_ADDR] = 0x219848,
+ [FW_EXCEPT_TYPE_ADDR] = 0x21987c,
+ [FW_EXCEPT_COUNT_ADDR] = 0x219848,
+ [FW_CIRQ_COUNT_ADDR] = 0x216f94,
+ [FW_CIRQ_IDX_ADDR] = 0x216ef8,
+ [FW_CIRQ_LISR_ADDR] = 0x2170ac,
+ [FW_TASK_ID_ADDR] = 0x216f90,
+ [FW_TASK_IDX_ADDR] = 0x216f9c,
+ [FW_TASK_QID1_ADDR] = 0x219680,
+ [FW_TASK_QID2_ADDR] = 0x219760,
+ [FW_TASK_START_ADDR] = 0x219558,
+ [FW_TASK_END_ADDR] = 0x219554,
+ [FW_TASK_SIZE_ADDR] = 0x219560,
+ [FW_LAST_MSG_ID_ADDR] = 0x216f70,
+ [FW_EINT_INFO_ADDR] = 0x219818,
+ [FW_SCHED_INFO_ADDR] = 0x219828,
+ [SWDEF_BASE_ADDR] = 0x41f200,
+ [TXQ_WED_RING_BASE] = 0xd7300,
+ [RXQ_WED_RING_BASE] = 0xd7410,
+ [RXQ_WED_DATA_RING_BASE] = 0xd4500,
};
static const u32 mt7916_reg[] = {
- [INT_SOURCE_CSR] = 0xd4200,
- [INT_MASK_CSR] = 0xd4204,
- [INT1_SOURCE_CSR] = 0xd8200,
- [INT1_MASK_CSR] = 0xd8204,
- [INT_MCU_CMD_SOURCE] = 0xd41f0,
- [INT_MCU_CMD_EVENT] = 0x2108,
- [WFDMA0_ADDR] = 0xd4000,
- [WFDMA0_PCIE1_ADDR] = 0xd8000,
- [WFDMA_EXT_CSR_ADDR] = 0xd7000,
- [CBTOP1_PHY_END] = 0x7fffffff,
- [INFRA_MCU_ADDR_END] = 0x7c085fff,
- [FW_EXCEPTION_ADDR] = 0x022050bc,
- [SWDEF_BASE_ADDR] = 0x411400,
+ [INT_SOURCE_CSR] = 0xd4200,
+ [INT_MASK_CSR] = 0xd4204,
+ [INT1_SOURCE_CSR] = 0xd8200,
+ [INT1_MASK_CSR] = 0xd8204,
+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
+ [INT_MCU_CMD_EVENT] = 0x2108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_ASSERT_STAT_ADDR] = 0x02204c14,
+ [FW_EXCEPT_TYPE_ADDR] = 0x022051a4,
+ [FW_EXCEPT_COUNT_ADDR] = 0x022050bc,
+ [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
+ [FW_CIRQ_IDX_ADDR] = 0x02204f84,
+ [FW_CIRQ_LISR_ADDR] = 0x022050d0,
+ [FW_TASK_ID_ADDR] = 0x0220406c,
+ [FW_TASK_IDX_ADDR] = 0x0220500c,
+ [FW_TASK_QID1_ADDR] = 0x022028c8,
+ [FW_TASK_QID2_ADDR] = 0x02202a38,
+ [FW_TASK_START_ADDR] = 0x0220286c,
+ [FW_TASK_END_ADDR] = 0x02202870,
+ [FW_TASK_SIZE_ADDR] = 0x02202878,
+ [FW_LAST_MSG_ID_ADDR] = 0x02204fe8,
+ [FW_EINT_INFO_ADDR] = 0x0220525c,
+ [FW_SCHED_INFO_ADDR] = 0x0220516c,
+ [SWDEF_BASE_ADDR] = 0x411400,
+ [TXQ_WED_RING_BASE] = 0xd7300,
+ [RXQ_WED_RING_BASE] = 0xd7410,
+ [RXQ_WED_DATA_RING_BASE] = 0xd4540,
};
static const u32 mt7986_reg[] = {
- [INT_SOURCE_CSR] = 0x24200,
- [INT_MASK_CSR] = 0x24204,
- [INT1_SOURCE_CSR] = 0x28200,
- [INT1_MASK_CSR] = 0x28204,
- [INT_MCU_CMD_SOURCE] = 0x241f0,
- [INT_MCU_CMD_EVENT] = 0x54000108,
- [WFDMA0_ADDR] = 0x24000,
- [WFDMA0_PCIE1_ADDR] = 0x28000,
- [WFDMA_EXT_CSR_ADDR] = 0x27000,
- [CBTOP1_PHY_END] = 0x7fffffff,
- [INFRA_MCU_ADDR_END] = 0x7c085fff,
- [FW_EXCEPTION_ADDR] = 0x02204ffc,
- [SWDEF_BASE_ADDR] = 0x411400,
+ [INT_SOURCE_CSR] = 0x24200,
+ [INT_MASK_CSR] = 0x24204,
+ [INT1_SOURCE_CSR] = 0x28200,
+ [INT1_MASK_CSR] = 0x28204,
+ [INT_MCU_CMD_SOURCE] = 0x241f0,
+ [INT_MCU_CMD_EVENT] = 0x54000108,
+ [WFDMA0_ADDR] = 0x24000,
+ [WFDMA0_PCIE1_ADDR] = 0x28000,
+ [WFDMA_EXT_CSR_ADDR] = 0x27000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_ASSERT_STAT_ADDR] = 0x02204b54,
+ [FW_EXCEPT_TYPE_ADDR] = 0x022050dc,
+ [FW_EXCEPT_COUNT_ADDR] = 0x02204ffc,
+ [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
+ [FW_CIRQ_IDX_ADDR] = 0x02204ec4,
+ [FW_CIRQ_LISR_ADDR] = 0x02205010,
+ [FW_TASK_ID_ADDR] = 0x02204fac,
+ [FW_TASK_IDX_ADDR] = 0x02204f4c,
+ [FW_TASK_QID1_ADDR] = 0x02202814,
+ [FW_TASK_QID2_ADDR] = 0x02202984,
+ [FW_TASK_START_ADDR] = 0x022027b8,
+ [FW_TASK_END_ADDR] = 0x022027bc,
+ [FW_TASK_SIZE_ADDR] = 0x022027c4,
+ [FW_LAST_MSG_ID_ADDR] = 0x02204f28,
+ [FW_EINT_INFO_ADDR] = 0x02205194,
+ [FW_SCHED_INFO_ADDR] = 0x022051a4,
+ [SWDEF_BASE_ADDR] = 0x411400,
+ [TXQ_WED_RING_BASE] = 0x24420,
+ [RXQ_WED_RING_BASE] = 0x24520,
+ [RXQ_WED_DATA_RING_BASE] = 0x24540,
};
static const u32 mt7915_offs[] = {
@@ -448,6 +507,14 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return mt7915_reg_map_l2(dev, addr);
}
+void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
+ size_t len)
+{
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+}
+
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
@@ -472,6 +539,257 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
+ int ret;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->mt76.token_lock);
+
+ ret = wait_event_timeout(dev->mt76.tx_wait,
+ !dev->mt76.wed_token_count, HZ);
+ if (!ret)
+ return -EAGAIN;
+
+ phy = &dev->phy;
+ mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
+
+ return 0;
+}
+
+static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = MT7915_TOKEN_SIZE;
+ spin_unlock_bh(&dev->mt76.token_lock);
+
+ /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
+ * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
+ */
+ phy = &dev->phy;
+ mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
+}
+
+static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+ struct page *page;
+ int i;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(&dev->mt76, i);
+ if (!t || !t->ptr)
+ continue;
+
+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
+ wed->wlan.rx_size, DMA_FROM_DEVICE);
+ skb_free_frag(t->ptr);
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+ }
+
+ if (!wed->rx_buf_ring.rx_page.va)
+ return;
+
+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
+}
+
+static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt7915_dev *dev;
+ u32 length;
+ int i;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
+ sizeof(struct skb_shared_info));
+
+ for (i = 0; i < size; i++) {
+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
+ dma_addr_t phy_addr;
+ int token;
+ void *ptr;
+
+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
+ GFP_KERNEL);
+ if (!ptr)
+ goto unmap;
+
+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
+ wed->wlan.rx_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
+ skb_free_frag(ptr);
+ goto unmap;
+ }
+
+ desc->buf0 = cpu_to_le32(phy_addr);
+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
+ desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
+ token));
+ desc++;
+ }
+
+ return 0;
+
+unmap:
+ mt7915_mmio_wed_release_rx_buf(wed);
+ return -ENOMEM;
+}
+
+static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
+ struct mtk_wed_wo_rx_stats *stats)
+{
+ int idx = le16_to_cpu(stats->wlan_idx);
+ struct mt7915_dev *dev;
+ struct mt76_wcid *wcid;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ if (idx >= mt7915_wtbl_size(dev))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (wcid) {
+ wcid->stats.rx_bytes += le32_to_cpu(stats->rx_byte_cnt);
+ wcid->stats.rx_packets += le32_to_cpu(stats->rx_pkt_cnt);
+ wcid->stats.rx_errors += le32_to_cpu(stats->rx_err_cnt);
+ wcid->stats.rx_drops += le32_to_cpu(stats->rx_drop_cnt);
+ }
+
+ rcu_read_unlock();
+}
+#endif
+
+int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ bool pci, int *irq)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ int ret;
+
+ if (!wed_enable)
+ return 0;
+
+ if (pci) {
+ struct pci_dev *pci_dev = pdev_ptr;
+
+ wed->wlan.pci_dev = pci_dev;
+ wed->wlan.bus_type = MTK_WED_BUS_PCIE;
+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
+ pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
+ wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
+ MT_INT_WED_SOURCE_CSR;
+ wed->wlan.wpdma_mask = pci_resource_start(pci_dev, 0) +
+ MT_INT_WED_MASK_CSR;
+ wed->wlan.wpdma_phys = pci_resource_start(pci_dev, 0) +
+ MT_WFDMA_EXT_CSR_BASE;
+ wed->wlan.wpdma_tx = pci_resource_start(pci_dev, 0) +
+ MT_TXQ_WED_RING_BASE;
+ wed->wlan.wpdma_txfree = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_RING_BASE;
+ wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
+ MT_WPDMA_GLO_CFG;
+ wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_DATA_RING_BASE;
+ } else {
+ struct platform_device *plat_dev = pdev_ptr;
+ struct resource *res;
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ wed->wlan.platform_dev = plat_dev;
+ wed->wlan.bus_type = MTK_WED_BUS_AXI;
+ wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
+ resource_size(res));
+ wed->wlan.phy_base = res->start;
+ wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
+ wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
+ wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
+ wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
+ wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
+ wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
+ }
+ wed->wlan.nbuf = 4096;
+ wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
+ wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31;
+ wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1;
+ wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+ wed->wlan.wcid_512 = !is_mt7915(&dev->mt76);
+
+ wed->wlan.rx_nbuf = 65536;
+ wed->wlan.rx_npkt = MT7915_WED_RX_TOKEN_SIZE;
+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
+ if (is_mt7915(&dev->mt76)) {
+ wed->wlan.rx_tbit[0] = 16;
+ wed->wlan.rx_tbit[1] = 17;
+ } else if (is_mt7986(&dev->mt76)) {
+ wed->wlan.rx_tbit[0] = 22;
+ wed->wlan.rx_tbit[1] = 23;
+ } else {
+ wed->wlan.rx_tbit[0] = 18;
+ wed->wlan.rx_tbit[1] = 19;
+ }
+
+ wed->wlan.init_buf = mt7915_wed_init_buf;
+ wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable;
+ wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
+ wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
+
+ dev->mt76.rx_token_size = wed->wlan.rx_npkt;
+
+ if (mtk_wed_device_attach(wed))
+ return 0;
+
+ *irq = wed->irq;
+ dev->mt76.dma_dev = wed->dev;
+
+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return 1;
+#else
+ return 0;
+#endif
+}
+
static int mt7915_mmio_init(struct mt76_dev *mdev,
void __iomem *mem_base,
u32 device_id)
@@ -536,7 +854,11 @@ void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
mdev->mmio.irqmask |= set;
if (write_reg) {
- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
+ mdev->mmio.irqmask);
+ else
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
}
@@ -560,6 +882,8 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
@@ -613,10 +937,9 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
u32 val = mt76_rr(dev, MT_MCU_CMD);
mt76_wr(dev, MT_MCU_CMD, val);
- if (val & MT_MCU_CMD_ERROR_MASK) {
- dev->reset_state = val;
- queue_work(dev->mt76.wq, &dev->reset_work);
- wake_up(&dev->reset_wait);
+ if (val & (MT_MCU_CMD_ERROR_MASK | MT_MCU_CMD_WDT_MASK)) {
+ dev->recovery.state = val;
+ mt7915_reset(dev);
}
}
}
@@ -648,7 +971,8 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 1eb11617a625..6351feba6bdf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -68,6 +68,8 @@
#define MT7915_MIN_TWT_DUR 64
#define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2)
+#define MT7915_WED_RX_TOKEN_SIZE 12288
+
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@@ -114,6 +116,8 @@ struct mt7915_twt_flow {
u8 sched:1;
};
+DECLARE_EWMA(avg_signal, 10, 8)
+
struct mt7915_sta {
struct mt76_wcid wcid; /* must be first */
@@ -123,10 +127,12 @@ struct mt7915_sta {
struct list_head rc_list;
u32 airtime_ac[8];
+ int ack_signal;
+ struct ewma_avg_signal avg_ack_signal;
+
unsigned long changed;
unsigned long jiffies;
unsigned long ampdu_state;
-
struct mt76_connac_sta_key_conf bip;
struct {
@@ -220,6 +226,15 @@ struct mib_stats {
u32 tx_amsdu_cnt;
};
+/* crash-dump */
+struct mt7915_crash_data {
+ guid_t guid;
+ struct timespec64 timestamp;
+
+ u8 *memdump_buf;
+ size_t memdump_buf_len;
+};
+
struct mt7915_hif {
struct list_head list;
@@ -243,7 +258,6 @@ struct mt7915_phy {
u32 rxfilter;
u64 omac_mask;
- u8 band_idx;
u16 noise;
@@ -301,9 +315,26 @@ struct mt7915_dev {
struct work_struct init_work;
struct work_struct rc_work;
+ struct work_struct dump_work;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
- u32 reset_state;
+
+ struct {
+ u32 state;
+ u32 wa_reset_count;
+ u32 wm_reset_count;
+ bool hw_full_reset:1;
+ bool hw_init_done:1;
+ bool restart:1;
+ } recovery;
+
+ /* protects coredump data */
+ struct mutex dump_mutex;
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct mt7915_crash_data *crash_data;
+ } coredump;
+#endif
struct list_head sta_rc_list;
struct list_head sta_poll_list;
@@ -357,6 +388,7 @@ enum mt7915_rdd_cmd {
RDD_DET_MODE,
RDD_RADAR_EMULATE,
RDD_START_TXQ = 20,
+ RDD_SET_WF_ANT = 30,
RDD_CAC_START = 50,
RDD_CAC_END,
RDD_NORMAL_START,
@@ -442,7 +474,14 @@ s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
+int mt7915_dma_reset(struct mt7915_dev *dev, bool force);
+int mt7915_txbf_init(struct mt7915_dev *dev);
+void mt7915_init_txpower(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt7915_reset(struct mt7915_dev *dev);
+int mt7915_run(struct ieee80211_hw *hw);
int mt7915_mcu_init(struct mt7915_dev *dev);
+int mt7915_mcu_init_firmware(struct mt7915_dev *dev);
int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
struct mt7915_vif *mvif,
struct mt7915_twt_flow *flow,
@@ -463,8 +502,8 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int enable, u32 changed);
-int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- bool enable);
+int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -488,6 +527,10 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
+int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower);
+int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, s8 txpower);
int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
@@ -542,11 +585,17 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
+void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
+ size_t len);
+
+void mt7915_mac_init(struct mt7915_dev *dev);
u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw);
bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
+void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key,
@@ -558,6 +607,7 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
+void mt7915_mac_dump_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
@@ -572,7 +622,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_token_put(struct mt7915_dev *dev);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *info);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7915_stats_work(struct work_struct *work);
@@ -583,6 +633,7 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
+int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev);
int mt7915_init_debugfs(struct mt7915_phy *phy);
void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
@@ -590,5 +641,7 @@ bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
#endif
+int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ bool pci, int *irq);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 728a879c3b00..39132894e8ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -12,9 +12,6 @@
#include "mac.h"
#include "../trace.h"
-static bool wed_enable = false;
-module_param(wed_enable, bool, 0644);
-
static LIST_HEAD(hif_list);
static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
@@ -65,10 +62,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
+ struct pci_dev *tmp_pdev;
+
hif_idx++;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
- !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
- return NULL;
+
+ tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL);
+ if (!tmp_pdev) {
+ tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL);
+ if (!tmp_pdev)
+ return NULL;
+ }
+ pci_dev_put(tmp_pdev);
writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
@@ -95,94 +99,6 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
-{
- struct mt7915_dev *dev;
- struct mt7915_phy *phy;
- int ret;
-
- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
-
- spin_lock_bh(&dev->mt76.token_lock);
- dev->mt76.token_size = wed->wlan.token_start;
- spin_unlock_bh(&dev->mt76.token_lock);
-
- ret = wait_event_timeout(dev->mt76.tx_wait,
- !dev->mt76.wed_token_count, HZ);
- if (!ret)
- return -EAGAIN;
-
- phy = &dev->phy;
- mt76_set(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
-
- phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
- if (phy)
- mt76_set(dev, MT_AGG_ACR4(phy->band_idx),
- MT_AGG_ACR_PPDU_TXS2H);
-
- return 0;
-}
-
-static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
-{
- struct mt7915_dev *dev;
- struct mt7915_phy *phy;
-
- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
-
- spin_lock_bh(&dev->mt76.token_lock);
- dev->mt76.token_size = MT7915_TOKEN_SIZE;
- spin_unlock_bh(&dev->mt76.token_lock);
-
- /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
- * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
- */
- phy = &dev->phy;
- mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
-
- phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
- if (phy)
- mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
- MT_AGG_ACR_PPDU_TXS2H);
-}
-#endif
-
-static int
-mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
-{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
- int ret;
-
- if (!wed_enable)
- return 0;
-
- wed->wlan.pci_dev = pdev;
- wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
- MT_WFDMA_EXT_CSR_BASE;
- wed->wlan.nbuf = 4096;
- wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
- wed->wlan.init_buf = mt7915_wed_init_buf;
- wed->wlan.offload_enable = mt7915_wed_offload_enable;
- wed->wlan.offload_disable = mt7915_wed_offload_disable;
-
- if (mtk_wed_device_attach(wed) != 0)
- return 0;
-
- *irq = wed->irq;
- dev->mt76.dma_dev = wed->dev;
-
- ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- return 1;
-#else
- return 0;
-#endif
-}
-
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -220,7 +136,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
mt7915_wfsys_reset(dev);
hif2 = mt7915_pci_init_hif2(pdev);
- ret = mt7915_pci_wed_init(dev, pdev, &irq);
+ ret = mt7915_mmio_wed_init(dev, pdev, true, &irq);
if (ret < 0)
goto free_wed_or_irq_vector;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 5920e705835a..aca1b2f1e9e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -24,8 +24,26 @@ enum reg_rev {
WFDMA_EXT_CSR_ADDR,
CBTOP1_PHY_END,
INFRA_MCU_ADDR_END,
- FW_EXCEPTION_ADDR,
+ FW_ASSERT_STAT_ADDR,
+ FW_EXCEPT_TYPE_ADDR,
+ FW_EXCEPT_COUNT_ADDR,
+ FW_CIRQ_COUNT_ADDR,
+ FW_CIRQ_IDX_ADDR,
+ FW_CIRQ_LISR_ADDR,
+ FW_TASK_ID_ADDR,
+ FW_TASK_IDX_ADDR,
+ FW_TASK_QID1_ADDR,
+ FW_TASK_QID2_ADDR,
+ FW_TASK_START_ADDR,
+ FW_TASK_END_ADDR,
+ FW_TASK_SIZE_ADDR,
+ FW_LAST_MSG_ID_ADDR,
+ FW_EINT_INFO_ADDR,
+ FW_SCHED_INFO_ADDR,
SWDEF_BASE_ADDR,
+ TXQ_WED_RING_BASE,
+ RXQ_WED_RING_BASE,
+ RXQ_WED_DATA_RING_BASE,
__MT_REG_MAX,
};
@@ -224,6 +242,14 @@ enum offs_rev {
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */
+#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000)
+#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs))
+
+#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
+
/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
@@ -523,8 +549,22 @@ enum offs_rev {
#define MT_WF_RFCR1_DROP_CFEND BIT(7)
#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+#define MT_WF_RMAC_RSVD0(_band) MT_WF_RMAC(_band, 0x02e0)
+#define MT_WF_RMAC_RSVD0_EIFS_CLR BIT(21)
+
#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_OBSS_BACKOFF GENMASK(15, 0)
+#define MT_WF_RMAC_MIB_ED_OFFSET GENMASK(20, 16)
+
+#define MT_WF_RMAC_MIB_AIRTIME1(_band) MT_WF_RMAC(_band, 0x0384)
+#define MT_WF_RMAC_MIB_NONQOSD_BACKOFF GENMASK(31, 16)
+
+#define MT_WF_RMAC_MIB_AIRTIME3(_band) MT_WF_RMAC(_band, 0x038c)
+#define MT_WF_RMAC_MIB_QOS01_BACKOFF GENMASK(31, 0)
+
+#define MT_WF_RMAC_MIB_AIRTIME4(_band) MT_WF_RMAC(_band, 0x0390)
+#define MT_WF_RMAC_MIB_QOS23_BACKOFF GENMASK(31, 0)
/* WFDMA0 */
#define MT_WFDMA0_BASE __REG(WFDMA0_ADDR)
@@ -539,6 +579,8 @@ enum offs_rev {
#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
+#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
+
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
@@ -547,9 +589,14 @@ enum offs_rev {
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
+
+#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0)
+#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10)
+
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
+#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
@@ -596,6 +643,7 @@ enum offs_rev {
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
+#define MT_INT_WED_SOURCE_CSR MT_WFDMA_EXT_CSR(0x200)
#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
@@ -642,6 +690,10 @@ enum offs_rev {
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
MT_TXQ_ID(q)* 0x4)
+#define MT_TXQ_WED_RING_BASE __REG(TXQ_WED_RING_BASE)
+#define MT_RXQ_WED_RING_BASE __REG(RXQ_WED_RING_BASE)
+#define MT_RXQ_WED_DATA_RING_BASE __REG(RXQ_WED_DATA_RING_BASE)
+
#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
@@ -660,6 +712,11 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+#define MT_INT_WED_RX_DONE_BAND0_MT7916 BIT(18)
+#define MT_INT_WED_RX_DONE_BAND1_MT7916 BIT(19)
+#define MT_INT_WED_RX_DONE_WA_MAIN_MT7916 BIT(1)
+#define MT_INT_WED_RX_DONE_WA_MT7916 BIT(17)
+
#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
@@ -683,6 +740,8 @@ enum offs_rev {
#define MT_INT_TX_DONE_BAND0 BIT(30)
#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
+#define MT_INT_WED_TX_DONE_BAND0 BIT(4)
+#define MT_INT_WED_TX_DONE_BAND1 BIT(5)
#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
MT_INT_TX_MCU(MT_MCUQ_WM) | \
@@ -696,6 +755,10 @@ enum offs_rev {
#define MT_MCU_CMD_NORMAL_STATE BIT(5)
#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+#define MT_MCU_CMD_WA_WDT BIT(31)
+#define MT_MCU_CMD_WM_WDT BIT(30)
+#define MT_MCU_CMD_WDT_MASK GENMASK(31, 30)
+
/* TOP RGU */
#define MT_TOP_RGU_BASE 0x18000000
#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
@@ -938,7 +1001,22 @@ enum offs_rev {
#define MT_ADIE_TYPE_MASK BIT(1)
/* FW MODE SYNC */
-#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
+#define MT_FW_ASSERT_STAT __REG(FW_ASSERT_STAT_ADDR)
+#define MT_FW_EXCEPT_TYPE __REG(FW_EXCEPT_TYPE_ADDR)
+#define MT_FW_EXCEPT_COUNT __REG(FW_EXCEPT_COUNT_ADDR)
+#define MT_FW_CIRQ_COUNT __REG(FW_CIRQ_COUNT_ADDR)
+#define MT_FW_CIRQ_IDX __REG(FW_CIRQ_IDX_ADDR)
+#define MT_FW_CIRQ_LISR __REG(FW_CIRQ_LISR_ADDR)
+#define MT_FW_TASK_ID __REG(FW_TASK_ID_ADDR)
+#define MT_FW_TASK_IDX __REG(FW_TASK_IDX_ADDR)
+#define MT_FW_TASK_QID1 __REG(FW_TASK_QID1_ADDR)
+#define MT_FW_TASK_QID2 __REG(FW_TASK_QID2_ADDR)
+#define MT_FW_TASK_START __REG(FW_TASK_START_ADDR)
+#define MT_FW_TASK_END __REG(FW_TASK_END_ADDR)
+#define MT_FW_TASK_SIZE __REG(FW_TASK_SIZE_ADDR)
+#define MT_FW_LAST_MSG_ID __REG(FW_LAST_MSG_ID_ADDR)
+#define MT_FW_EINT_INFO __REG(FW_EINT_INFO_ADDR)
+#define MT_FW_SCHED_INFO __REG(FW_SCHED_INFO_ADDR)
#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
@@ -1108,9 +1186,15 @@ enum offs_rev {
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+#define MT_WF_PHY_TPC_CTRL_STAT(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 16))
+#define MT_WF_PHY_TPC_CTRL_STAT_MT7916(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 20))
+#define MT_WF_PHY_TPC_POWER GENMASK(15, 8)
+
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
#define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80)
#define MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR MT_MCU_WM_CIRQ(0xc0)
+#define MT_MCU_WM_CIRQ_EINT_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x108)
+#define MT_MCU_WM_CIRQ_EINT_SOFT_ADDR MT_MCU_WM_CIRQ(0x118)
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index c74afa746251..c06c56a0270d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -1172,10 +1172,6 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
mem_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
@@ -1187,6 +1183,18 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
return PTR_ERR(dev);
mdev = &dev->mt76;
+ ret = mt7915_mmio_wed_init(dev, pdev, false, &irq);
+ if (ret < 0)
+ goto free_device;
+
+ if (!ret) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto free_device;
+ }
+ }
+
ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
@@ -1206,9 +1214,10 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
free_irq:
devm_free_irq(mdev->dev, irq, dev);
-
free_device:
- mt76_free_device(&dev->mt76);
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_detach(&mdev->mmio.wed);
+ mt76_free_device(mdev);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index efb9bb8231e2..0d76ae31b376 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -44,14 +44,14 @@ mt7915_tm_set_tx_power(struct mt7915_phy *phy)
int ret;
struct {
u8 format_id;
- u8 dbdc_idx;
+ u8 band_idx;
s8 tx_power;
u8 ant_idx; /* Only 0 is valid */
u8 center_chan;
u8 rsv[3];
} __packed req = {
.format_id = 0xf,
- .dbdc_idx = phy != &dev->phy,
+ .band_idx = phy->mt76->band_idx,
.center_chan = ieee80211_frequency_to_channel(freq),
};
u8 *tx_power = NULL;
@@ -77,7 +77,7 @@ mt7915_tm_set_freq_offset(struct mt7915_phy *phy, bool en, u32 val)
struct mt7915_tm_cmd req = {
.testmode_en = en,
.param_idx = MCU_ATE_SET_FREQ_OFFSET,
- .param.freq.band = phy != &dev->phy,
+ .param.freq.band = phy->mt76->band_idx,
.param.freq.freq_offset = cpu_to_le32(val),
};
@@ -111,7 +111,7 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
.param_idx = MCU_ATE_SET_TRX,
.param.trx.type = type,
.param.trx.enable = en,
- .param.trx.band = phy != &dev->phy,
+ .param.trx.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@@ -126,7 +126,7 @@ mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
.testmode_en = 1,
.param_idx = MCU_ATE_CLEAN_TXQUEUE,
.param.clean.wcid = wcid,
- .param.clean.band = phy != &dev->phy,
+ .param.clean.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@@ -144,7 +144,7 @@ mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs)
.param.slot.sifs = sifs,
.param.slot.rifs = 2,
.param.slot.eifs = cpu_to_le16(60),
- .param.slot.band = phy != &dev->phy,
+ .param.slot.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@@ -198,6 +198,7 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
u8 sig_ext = (mode == MT76_TM_TX_MODE_CCK) ? 0 : 6;
u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
u8 aifsn = TM_MIN_AIFSN;
+ u8 band = phy->mt76->band_idx;
u32 i2t_time, tr2t_time, txv_time;
u16 cw = 0;
@@ -232,14 +233,14 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
sifs = min_t(u32, ipg, TM_MAX_SIFS);
}
done:
- txv_time = mt76_get_field(dev, MT_TMAC_ATCR(phy->band_idx),
+ txv_time = mt76_get_field(dev, MT_TMAC_ATCR(band),
MT_TMAC_ATCR_TXV_TOUT);
txv_time *= 50; /* normal clock time */
i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
- mt76_set(dev, MT_TMAC_TRCR0(phy->band_idx),
+ mt76_set(dev, MT_TMAC_TRCR0(band),
FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
@@ -336,6 +337,7 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
int n_regs = ARRAY_SIZE(reg_backup_list);
struct mt7915_dev *dev = phy->dev;
u32 *b = phy->test.reg_backup;
+ u8 band = phy->mt76->band_idx;
int i;
REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
@@ -358,7 +360,7 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
- mt76_wr(dev, reg_backup_list[i].band[phy->band_idx], b[i]);
+ mt76_wr(dev, reg_backup_list[i].band[band], b[i]);
return;
}
@@ -369,33 +371,33 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
- b[i] = mt76_rr(dev, reg_backup_list[i].band[phy->band_idx]);
+ b[i] = mt76_rr(dev, reg_backup_list[i].band[band]);
}
- mt76_clear(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_MM_PROT |
+ mt76_clear(dev, MT_AGG_PCR0(band, 0), MT_AGG_PCR0_MM_PROT |
MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
- mt76_set(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_PTA_WIN_DIS);
+ mt76_set(dev, MT_AGG_PCR0(band, 0), MT_AGG_PCR0_PTA_WIN_DIS);
- mt76_wr(dev, MT_AGG_PCR0(phy->band_idx, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
+ mt76_wr(dev, MT_AGG_PCR0(band, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
MT_AGG_PCR1_RTS0_LEN_THRES);
- mt76_clear(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_BAR_CNT_LIMIT |
+ mt76_clear(dev, MT_AGG_MRCR(band), MT_AGG_MRCR_BAR_CNT_LIMIT |
MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
- mt76_rmw(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_RTS_FAIL_LIMIT |
+ mt76_rmw(dev, MT_AGG_MRCR(band), MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
- mt76_wr(dev, MT_TMAC_TFCR0(phy->band_idx), 0);
- mt76_clear(dev, MT_TMAC_TCR0(phy->band_idx), MT_TMAC_TCR0_TBTT_STOP_CTRL);
+ mt76_wr(dev, MT_TMAC_TFCR0(band), 0);
+ mt76_clear(dev, MT_TMAC_TCR0(band), MT_TMAC_TCR0_TBTT_STOP_CTRL);
/* config rx filter for testmode rx */
- mt76_wr(dev, MT_WF_RFCR(phy->band_idx), 0xcf70a);
- mt76_wr(dev, MT_WF_RFCR1(phy->band_idx), 0);
+ mt76_wr(dev, MT_WF_RFCR(band), 0xcf70a);
+ mt76_wr(dev, MT_WF_RFCR1(band), 0);
}
static void
@@ -432,8 +434,6 @@ mt7915_tm_update_channel(struct mt7915_phy *phy)
static void
mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
{
- static const u8 spe_idx_map[] = {0, 0, 1, 0, 3, 2, 4, 0,
- 9, 8, 6, 10, 16, 12, 18, 0};
struct mt76_testmode_data *td = &phy->mt76->test;
struct mt7915_dev *dev = phy->dev;
struct ieee80211_tx_info *info;
@@ -447,15 +447,10 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
if (en) {
mt7915_tm_update_channel(phy);
- if (td->tx_spe_idx) {
+ if (td->tx_spe_idx)
phy->test.spe_idx = td->tx_spe_idx;
- } else {
- u8 tx_ant = td->tx_antenna_mask;
-
- if (phy != &dev->phy)
- tx_ant >>= dev->chainshift;
- phy->test.spe_idx = spe_idx_map[tx_ant];
- }
+ else
+ phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
}
mt7915_tm_set_tam_arb(phy, en,
@@ -495,7 +490,7 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
mt7915_tm_update_channel(phy);
/* read-clear */
- mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
+ mt76_rr(dev, MT_MIB_SDR3(phy->mt76->band_idx));
mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
}
}
@@ -522,6 +517,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
struct mt76_testmode_data *td = &phy->mt76->test;
u32 func_idx = en ? TX_CONT_START : TX_CONT_STOP;
u8 rate_idx = td->tx_rate_idx, mode;
+ u8 band = phy->mt76->band_idx;
u16 rateval;
struct mt7915_tm_rf_test req = {
.action = 1,
@@ -533,7 +529,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
tx_cont->control_ch = chandef->chan->hw_value;
tx_cont->center_ch = freq1;
tx_cont->tx_ant = td->tx_antenna_mask;
- tx_cont->band = phy != &dev->phy;
+ tx_cont->band = band;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
@@ -565,7 +561,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
}
if (!en) {
- req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
+ req.op.rf.param.func_data = cpu_to_le32(band);
goto out;
}
@@ -696,7 +692,9 @@ mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
{
struct mt76_testmode_data *td = &mphy->test;
struct mt7915_phy *phy = mphy->priv;
- u32 changed = 0;
+ struct mt7915_dev *dev = phy->dev;
+ u32 chainmask = mphy->chainmask, changed = 0;
+ bool ext_phy = phy != &dev->phy;
int i;
BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
@@ -705,7 +703,8 @@ mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
td->state == MT76_TM_STATE_OFF)
return 0;
- if (td->tx_antenna_mask & ~mphy->chainmask)
+ chainmask = ext_phy ? chainmask >> dev->chainshift : chainmask;
+ if (td->tx_antenna_mask > chainmask)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
@@ -771,11 +770,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
- cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->mt76->band_idx));
fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
- q = phy->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
+ q = phy->mt76->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index bce76417f95d..29d8883268f6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -85,7 +85,7 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
seq_puts(file, "\nCount: ");
for (i = 0; i < ARRAY_SIZE(bound); i++)
- seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
seq_puts(file, "\n");
seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index dcdb3cf04ac1..542dfd425129 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
+#include <linux/firmware.h>
#include "mt7921.h"
#include "mac.h"
#include "mcu.h"
@@ -25,6 +26,27 @@ static const struct ieee80211_iface_combination if_comb[] = {
.max_interfaces = MT7921_MAX_INTERFACES,
.num_different_channels = 1,
.beacon_int_infra_match = true,
+ },
+};
+
+static const struct ieee80211_iface_limit if_limits_chanctx[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb_chanctx[] = {
+ {
+ .limits = if_limits_chanctx,
+ .n_limits = ARRAY_SIZE(if_limits_chanctx),
+ .max_interfaces = 2,
+ .num_different_channels = 2,
+ .beacon_int_infra_match = false,
}
};
@@ -37,6 +59,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ dev->country_ie_env = request->country_ie_env;
mt7921_mutex_acquire(dev);
mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
@@ -65,12 +88,20 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct mt7921_sta);
hw->vif_data_size = sizeof(struct mt7921_vif);
- wiphy->iface_combinations = if_comb;
+ if (dev->fw_features & MT7921_FW_CAP_CNM) {
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->iface_combinations = if_comb_chanctx;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_chanctx);
+ } else {
+ wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ }
wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
- wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ wiphy->max_remain_on_channel_duration = 5000;
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
wiphy->max_sched_scan_plan_interval =
@@ -129,6 +160,58 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
+u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
+{
+ struct mt7921_fw_features *features = NULL;
+ const struct mt76_connac2_fw_trailer *hdr;
+ struct mt7921_realease_info *rel_info;
+ const struct firmware *fw;
+ int ret, i, offset = 0;
+ const u8 *data, *end;
+
+ ret = request_firmware(&fw, fw_wm, dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev, "Invalid firmware\n");
+ return -EINVAL;
+ }
+
+ data = fw->data;
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt76_connac2_fw_region *region;
+
+ region = (const void *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ offset += le32_to_cpu(region->len);
+ }
+
+ data += offset + 16;
+ rel_info = (struct mt7921_realease_info *)data;
+ data += sizeof(*rel_info);
+ end = data + le16_to_cpu(rel_info->len);
+
+ while (data < end) {
+ rel_info = (struct mt7921_realease_info *)data;
+ data += sizeof(*rel_info);
+
+ if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
+ features = (struct mt7921_fw_features *)data;
+ break;
+ }
+
+ data += le16_to_cpu(rel_info->len) + rel_info->pad_len;
+ }
+
+ release_firmware(fw);
+
+ return features ? features->data : 0;
+}
+EXPORT_SYMBOL_GPL(mt7921_check_offload_capability);
+
int mt7921_mac_init(struct mt7921_dev *dev)
{
int i;
@@ -278,6 +361,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
INIT_WORK(&dev->init_work, mt7921_init_work);
+ INIT_WORK(&dev->phy.roc_work, mt7921_roc_work);
+ timer_setup(&dev->phy.roc_timer, mt7921_roc_timer, 0);
+ init_waitqueue_head(&dev->phy.roc_wait);
+
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 650ab97ae052..82db3762be33 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -168,14 +168,6 @@ static void
mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
struct mt76_rx_status *status, u8 chfreq)
{
- if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
- !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
- !test_bit(MT76_STATE_ROC, &mphy->state)) {
- status->freq = mphy->chandef.chan->center_freq;
- status->band = mphy->chandef.chan->band;
- return;
- }
-
if (chfreq > 180) {
status->band = NL80211_BAND_6GHZ;
chfreq = (chfreq - 181) * 4 + 1;
@@ -396,6 +388,27 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
+ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
+ rxv, &mode);
+ if (ret < 0)
+ return ret;
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ rxd += 6;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ rxv = rxd;
+ /* Monitor mode would use RCPI described in GROUP 5
+ * instead.
+ */
+ v1 = le32_to_cpu(rxv[0]);
+
+ rxd += 12;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
status->chains = mphy->antenna_mask;
status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
@@ -410,17 +423,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->signal = max(status->signal,
status->chain_signal[i]);
}
-
- ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
- rxv, &mode);
- if (ret < 0)
- return ret;
-
- if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
- rxd += 18;
- if ((u8 *)rxd - skb->data >= skb->len)
- return -EINVAL;
- }
}
amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
@@ -682,7 +684,7 @@ bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
EXPORT_SYMBOL_GPL(mt7921_rx_check);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *info)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
@@ -735,7 +737,7 @@ void mt7921_mac_reset_counters(struct mt7921_phy *phy)
}
dev->mt76.phy.survey_time = ktime_get_boottime();
- memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
+ memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@@ -856,7 +858,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
if (vif->type == NL80211_IFTYPE_AP) {
mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
- true);
+ true, NULL);
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
@@ -974,16 +976,16 @@ void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
mib->tx_amsdu_cnt += val;
}
- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
u32 val2;
val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
- dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr0++] += val >> 16;
- dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
+ phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr0++] += val >> 16;
+ phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff;
+ phy->mt76->aggr_stats[aggr1++] += val2 >> 16;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 7e409ac7d9a8..76ac5069638f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -386,6 +386,116 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
+static void mt7921_roc_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = priv;
+
+ mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
+}
+
+void mt7921_roc_work(struct work_struct *work)
+{
+ struct mt7921_phy *phy;
+
+ phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy,
+ roc_work);
+
+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return;
+
+ mt7921_mutex_acquire(phy->dev);
+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_roc_iter, phy);
+ mt7921_mutex_release(phy->dev);
+ ieee80211_remain_on_channel_expired(phy->mt76->hw);
+}
+
+void mt7921_roc_timer(struct timer_list *timer)
+{
+ struct mt7921_phy *phy = from_timer(phy, timer, roc_timer);
+
+ ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
+}
+
+static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif)
+{
+ int err;
+
+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return 0;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+
+ return err;
+}
+
+static int mt7921_set_roc(struct mt7921_phy *phy,
+ struct mt7921_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum mt7921_roc_req type)
+{
+ int err;
+
+ if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return -EBUSY;
+
+ phy->roc_grant = false;
+
+ err = mt7921_mcu_set_roc(phy, vif, chan, duration, type,
+ ++phy->roc_token_id);
+ if (err < 0) {
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ goto out;
+ }
+
+ if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
+ mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ err = -ETIMEDOUT;
+ }
+
+out:
+ return err;
+}
+
+static int mt7921_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int err;
+
+ mt7921_mutex_acquire(phy->dev);
+ err = mt7921_set_roc(phy, mvif, chan, duration, MT7921_ROC_REQ_ROC);
+ mt7921_mutex_release(phy->dev);
+
+ return err;
+}
+
+static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int err;
+
+ mt7921_mutex_acquire(phy->dev);
+ err = mt7921_abort_roc(phy, mvif);
+ mt7921_mutex_release(phy->dev);
+
+ return err;
+}
+
static int mt7921_set_channel(struct mt7921_phy *phy)
{
struct mt7921_dev *dev = phy->dev;
@@ -748,7 +858,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
- true);
+ true, mvif->ctx);
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -780,7 +890,8 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
ewma_rssi_init(&mvif->rssi);
if (!sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
- &mvif->sta.wcid, false);
+ &mvif->sta.wcid, false,
+ mvif->ctx);
}
spin_lock_bh(&dev->sta_poll_lock);
@@ -1075,7 +1186,7 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* Tx ampdu stat */
for (i = 0; i < 15; i++)
- data[ei++] = dev->mt76.aggr_stats[i];
+ data[ei++] = phy->mt76->aggr_stats[i];
data[ei++] = phy->mib.ba_miss_cnt;
@@ -1504,7 +1615,13 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
int err;
mt7921_mutex_acquire(dev);
+ err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2,
+ dev->country_ie_env);
+ if (err < 0)
+ goto out;
+
err = mt7921_set_tx_sar_pwr(hw, sar);
+out:
mt7921_mutex_release(dev);
return err;
@@ -1534,7 +1651,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
- true);
+ true, mvif->ctx);
if (err)
goto out;
@@ -1565,12 +1682,109 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (err)
goto out;
- mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
+ mvif->ctx);
out:
mt7921_mutex_release(dev);
}
+static int
+mt7921_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return 0;
+}
+
+static void
+mt7921_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+}
+
+static void mt7921_ctx_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct ieee80211_chanctx_conf *ctx = priv;
+
+ if (ctx != mvif->ctx)
+ return;
+
+ mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
+}
+
+static void
+mt7921_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+
+ mt7921_mutex_acquire(phy->dev);
+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
+ IEEE80211_IFACE_ITER_ACTIVE,
+ mt7921_ctx_iter, ctx);
+ mt7921_mutex_release(phy->dev);
+}
+
+static int
+mt7921_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mvif->ctx = ctx;
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void
+mt7921_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mvif->ctx = NULL;
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ u16 duration = info->duration ? info->duration :
+ jiffies_to_msecs(HZ);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration,
+ MT7921_ROC_REQ_JOIN);
+ mt7921_mutex_release(dev);
+}
+
+static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_abort_roc(mvif->phy, mvif);
+ mt7921_mutex_release(dev);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1621,6 +1835,15 @@ const struct ieee80211_ops mt7921_ops = {
#endif /* CONFIG_PM */
.flush = mt7921_flush,
.set_sar_specs = mt7921_set_sar_specs,
+ .remain_on_channel = mt7921_remain_on_channel,
+ .cancel_remain_on_channel = mt7921_cancel_remain_on_channel,
+ .add_chanctx = mt7921_add_chanctx,
+ .remove_chanctx = mt7921_remove_chanctx,
+ .change_chanctx = mt7921_change_chanctx,
+ .assign_vif_chanctx = mt7921_assign_vif_chanctx,
+ .unassign_vif_chanctx = mt7921_unassign_vif_chanctx,
+ .mgd_prepare_tx = mt7921_mgd_prepare_tx,
+ .mgd_complete_tx = mt7921_mgd_complete_tx,
};
EXPORT_SYMBOL_GPL(mt7921_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 67bf92969a7b..fb9c0f66cb27 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -155,6 +155,29 @@ void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
#endif /* CONFIG_PM */
static void
+mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_roc_grant_tlv *grant;
+ struct mt76_connac2_mcu_rxd *rxd;
+ int duration;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4);
+
+ /* should never happen */
+ WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
+
+ if (grant->reqtype == MT7921_ROC_REQ_ROC)
+ ieee80211_ready_on_channel(dev->mt76.phy.hw);
+
+ dev->phy.roc_grant = true;
+ wake_up(&dev->phy.roc_wait);
+ duration = le32_to_cpu(grant->max_interval);
+ mod_timer(&dev->phy.roc_timer,
+ round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
+}
+
+static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -200,20 +223,6 @@ mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
static void
-mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb)
-{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_mcu_bss_event *event;
-
- skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
- event = (struct mt76_connac_mcu_bss_event *)skb->data;
- if (event->is_absent)
- ieee80211_stop_queues(mphy->hw);
- else
- ieee80211_wake_queues(mphy->hw);
-}
-
-static void
mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_debug_msg {
@@ -279,9 +288,6 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
case MCU_EVENT_SCAN_DONE:
mt7921_mcu_scan_event(dev, skb);
return;
- case MCU_EVENT_BSS_ABSENCE:
- mt7921_mcu_bss_event(dev, skb);
- break;
case MCU_EVENT_DBG_MSG:
mt7921_mcu_debug_msg_event(dev, skb);
break;
@@ -302,6 +308,24 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
}
+static void
+mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev,
+ struct sk_buff *skb)
+{
+ struct mt76_connac2_mcu_rxd *rxd;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_UNI_EVENT_ROC:
+ mt7921_mcu_uni_roc_event(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_connac2_mcu_rxd *rxd;
@@ -311,6 +335,11 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) {
+ mt7921_mcu_uni_rx_unsolicited_event(dev, skb);
+ return;
+ }
+
if (rxd->eid == 0x6) {
mt76_mcu_rx_event(&dev->mt76, skb);
return;
@@ -319,7 +348,6 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
- rxd->eid == MCU_EVENT_BSS_ABSENCE ||
rxd->eid == MCU_EVENT_SCAN_DONE ||
rxd->eid == MCU_EVENT_TX_DONE ||
rxd->eid == MCU_EVENT_DBG_MSG ||
@@ -636,6 +664,103 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
&req_mu, sizeof(req_mu), false);
}
+int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum mt7921_roc_req type, u8 token_id)
+{
+ int center_ch = ieee80211_frequency_to_channel(chan->center_freq);
+ struct mt7921_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 rsv[4];
+ } __packed hdr;
+ struct roc_acquire_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 bss_idx;
+ u8 tokenid;
+ u8 control_channel;
+ u8 sco;
+ u8 band;
+ u8 bw;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw_from_ap;
+ u8 center_chan_from_ap;
+ u8 center_chan2_from_ap;
+ u8 reqtype;
+ __le32 maxinterval;
+ u8 dbdcband;
+ u8 rsv[3];
+ } __packed roc;
+ } __packed req = {
+ .roc = {
+ .tag = cpu_to_le16(UNI_ROC_ACQUIRE),
+ .len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
+ .tokenid = token_id,
+ .reqtype = type,
+ .maxinterval = cpu_to_le32(duration),
+ .bss_idx = vif->mt76.idx,
+ .control_channel = chan->hw_value,
+ .bw = CMD_CBW_20MHZ,
+ .bw_from_ap = CMD_CBW_20MHZ,
+ .center_chan = center_ch,
+ .center_chan_from_ap = center_ch,
+ .dbdcband = 0xff, /* auto */
+ },
+ };
+
+ if (chan->hw_value < center_ch)
+ req.roc.sco = 1; /* SCA */
+ else if (chan->hw_value > center_ch)
+ req.roc.sco = 3; /* SCB */
+
+ switch (chan->band) {
+ case NL80211_BAND_6GHZ:
+ req.roc.band = 3;
+ break;
+ case NL80211_BAND_5GHZ:
+ req.roc.band = 2;
+ break;
+ default:
+ req.roc.band = 1;
+ break;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
+ &req, sizeof(req), false);
+}
+
+int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
+ u8 token_id)
+{
+ struct mt7921_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 rsv[4];
+ } __packed hdr;
+ struct roc_abort_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 bss_idx;
+ u8 tokenid;
+ u8 dbdcband;
+ u8 rsv[5];
+ } __packed abort;
+ } __packed req = {
+ .abort = {
+ .tag = cpu_to_le16(UNI_ROC_ABORT),
+ .len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
+ .tokenid = token_id,
+ .bss_idx = vif->mt76.idx,
+ .dbdcband = 0xff, /* auto*/
+ },
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
+ &req, sizeof(req), false);
+}
+
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
{
struct mt7921_dev *dev = phy->dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index eaba114a9c7e..15d6b7fe1c6c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -32,6 +32,9 @@
#define MT7921_MCU_INIT_RETRY_COUNT 10
#define MT7921_WFSYS_INIT_RETRY_COUNT 2
+#define MT7921_FW_TAG_FEATURE 4
+#define MT7921_FW_CAP_CNM BIT(7)
+
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
@@ -53,6 +56,55 @@
#define MT7921_SDIO_HDR_TX_BYTES GENMASK(15, 0)
#define MT7921_SDIO_HDR_PKT_TYPE GENMASK(17, 16)
+#define MCU_UNI_EVENT_ROC 0x27
+
+enum {
+ UNI_ROC_ACQUIRE,
+ UNI_ROC_ABORT,
+ UNI_ROC_NUM
+};
+
+enum mt7921_roc_req {
+ MT7921_ROC_REQ_JOIN,
+ MT7921_ROC_REQ_ROC,
+ MT7921_ROC_REQ_NUM
+};
+
+enum {
+ UNI_EVENT_ROC_GRANT = 0,
+ UNI_EVENT_ROC_TAG_NUM
+};
+
+struct mt7921_realease_info {
+ __le16 len;
+ u8 pad_len;
+ u8 tag;
+} __packed;
+
+struct mt7921_fw_features {
+ u8 segment;
+ u8 data;
+ u8 rsv[14];
+} __packed;
+
+struct mt7921_roc_grant_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 bss_idx;
+ u8 tokenid;
+ u8 status;
+ u8 primarychannel;
+ u8 rfsco;
+ u8 rfband;
+ u8 channelwidth;
+ u8 centerfreqseg1;
+ u8 centerfreqseg2;
+ u8 reqtype;
+ u8 dbdcband;
+ u8 rsv[1];
+ __le32 max_interval;
+} __packed;
+
enum mt7921_sdio_pkt_type {
MT7921_SDIO_TXD,
MT7921_SDIO_DATA,
@@ -119,6 +171,7 @@ struct mt7921_vif {
struct ewma_rssi rssi;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct ieee80211_chanctx_conf *ctx;
};
struct mib_stats {
@@ -171,7 +224,7 @@ struct mt7921_clc {
u8 type;
u8 rsv[8];
u8 data[];
-};
+} __packed;
struct mt7921_phy {
struct mt76_phy *mt76;
@@ -200,6 +253,12 @@ struct mt7921_phy {
#endif
struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
+
+ struct work_struct roc_work;
+ struct timer_list roc_timer;
+ wait_queue_head_t roc_wait;
+ u8 roc_token_id;
+ bool roc_grant;
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
@@ -236,6 +295,7 @@ struct mt7921_dev {
struct work_struct init_work;
u8 fw_debug;
+ u8 fw_features;
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
@@ -244,6 +304,8 @@ struct mt7921_dev {
struct work_struct ipv6_ns_work;
/* IPv6 addresses for WoWLAN */
struct sk_buff_head ipv6_ns_list;
+
+ enum environment_cap country_ie_env;
};
enum {
@@ -408,7 +470,7 @@ void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_token_put(struct mt7921_dev *dev);
bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *info);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7921_stats_work(struct work_struct *work);
void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
@@ -425,6 +487,8 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable);
void mt7921_scan_work(struct work_struct *work);
+void mt7921_roc_work(struct work_struct *work);
+void mt7921_roc_timer(struct timer_list *timer);
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
@@ -508,4 +572,10 @@ int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
enum environment_cap env_cap);
+int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum mt7921_roc_req type, u8 token_id);
+int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
+ u8 token_id);
+u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 8a53d8f286db..cb72ded37256 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -13,10 +13,14 @@
#include "../trace.h"
static const struct pci_device_id mt7921_pci_device_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@@ -228,7 +232,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@@ -252,9 +257,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
+ struct ieee80211_ops *ops;
struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
+ u8 features;
int ret;
ret = pcim_enable_device(pdev);
@@ -278,8 +285,28 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (mt7921_disable_aspm)
mt76_pci_disable_aspm(pdev);
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops,
- &drv_ops);
+ features = mt7921_check_offload_capability(&pdev->dev, (const char *)
+ id->driver_data);
+ ops = devm_kmemdup(&pdev->dev, &mt7921_ops, sizeof(mt7921_ops),
+ GFP_KERNEL);
+ if (!ops) {
+ ret = -ENOMEM;
+ goto err_free_pci_vec;
+ }
+
+ if (!(features & MT7921_FW_CAP_CNM)) {
+ ops->remain_on_channel = NULL;
+ ops->cancel_remain_on_channel = NULL;
+ ops->add_chanctx = NULL;
+ ops->remove_chanctx = NULL;
+ ops->change_chanctx = NULL;
+ ops->assign_vif_chanctx = NULL;
+ ops->unassign_vif_chanctx = NULL;
+ ops->mgd_prepare_tx = NULL;
+ ops->mgd_complete_tx = NULL;
+ }
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops);
if (!mdev) {
ret = -ENOMEM;
goto err_free_pci_vec;
@@ -288,8 +315,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mdev);
dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->fw_features = features;
dev->hif_ops = &mt7921_pcie_ops;
-
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
@@ -480,6 +507,21 @@ failed:
return err;
}
+static void mt7921_pci_shutdown(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt76_connac_pm *pm = &dev->pm;
+
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+
+ /* chip cleanup before reboot */
+ mt7921_mcu_drv_pmctrl(dev);
+ mt7921_dma_cleanup(dev);
+ mt7921_wfsys_reset(dev);
+}
+
static DEFINE_SIMPLE_DEV_PM_OPS(mt7921_pm_ops, mt7921_pci_suspend, mt7921_pci_resume);
static struct pci_driver mt7921_pci_driver = {
@@ -487,6 +529,7 @@ static struct pci_driver mt7921_pci_driver = {
.id_table = mt7921_pci_device_table,
.probe = mt7921_pci_probe,
.remove = mt7921_pci_remove,
+ .shutdown = mt7921_pci_shutdown,
.driver.pm = pm_sleep_ptr(&mt7921_pm_ops),
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 3b25a06fd946..8ce4252b8ae7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -17,7 +17,8 @@
#include "mcu.h"
static const struct sdio_device_id mt7921s_table[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ } /* Terminating entry */
};
@@ -89,6 +90,7 @@ static int mt7921s_probe(struct sdio_func *func,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_SDIO_TXD_SIZE,
+ .drv_flags = MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@@ -121,18 +123,39 @@ static int mt7921s_probe(struct sdio_func *func,
.fw_own = mt7921s_mcu_fw_pmctrl,
};
+ struct ieee80211_ops *ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
+ u8 features;
int ret;
- mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
- &drv_ops);
+ features = mt7921_check_offload_capability(&func->dev, (const char *)
+ id->driver_data);
+
+ ops = devm_kmemdup(&func->dev, &mt7921_ops, sizeof(mt7921_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ if (!(features & MT7921_FW_CAP_CNM)) {
+ ops->remain_on_channel = NULL;
+ ops->cancel_remain_on_channel = NULL;
+ ops->add_chanctx = NULL;
+ ops->remove_chanctx = NULL;
+ ops->change_chanctx = NULL;
+ ops->assign_vif_chanctx = NULL;
+ ops->unassign_vif_chanctx = NULL;
+ ops->mgd_prepare_tx = NULL;
+ ops->mgd_complete_tx = NULL;
+ }
+
+ mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->fw_features = features;
dev->hif_ops = &mt7921_sdio_ops;
-
sdio_set_drvdata(func, dev);
ret = mt76s_init(mdev, func, &mt7921s_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 29c0ee330dbe..5321d20dcdcb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -13,7 +13,8 @@
#include "mac.h"
static const struct usb_device_id mt7921u_device_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@@ -170,7 +171,8 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_SDIO_TXD_SIZE,
- .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@@ -203,13 +205,28 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
struct ieee80211_hw *hw;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
+ u8 features;
int ret;
+ features = mt7921_check_offload_capability(&usb_intf->dev, (const char *)
+ id->driver_info);
ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops),
GFP_KERNEL);
if (!ops)
return -ENOMEM;
+ if (!(features & MT7921_FW_CAP_CNM)) {
+ ops->remain_on_channel = NULL;
+ ops->cancel_remain_on_channel = NULL;
+ ops->add_chanctx = NULL;
+ ops->remove_chanctx = NULL;
+ ops->change_chanctx = NULL;
+ ops->assign_vif_chanctx = NULL;
+ ops->unassign_vif_chanctx = NULL;
+ ops->mgd_prepare_tx = NULL;
+ ops->mgd_complete_tx = NULL;
+ }
+
ops->stop = mt7921u_stop;
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
@@ -217,6 +234,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
return -ENOMEM;
dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->fw_features = features;
dev->hif_ops = &hif_ops;
udev = usb_get_dev(udev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
new file mode 100644
index 000000000000..5c5fc569e6d5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: ISC
+config MT7996E
+ tristate "MediaTek MT7996 (PCIe) support"
+ select MT76_CONNAC_LIB
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7996-based wireless PCIe devices,
+ which support concurrent tri-band operation at 6GHz, 5GHz,
+ and 2.4GHz IEEE 802.11be 4x4:4SS 4096-QAM, 320MHz channels.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Makefile b/drivers/net/wireless/mediatek/mt76/mt7996/Makefile
new file mode 100644
index 000000000000..bcb9a3c53149
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7996E) += mt7996e.o
+
+mt7996e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
+ debugfs.o mmio.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
new file mode 100644
index 000000000000..2e4a8909b9e8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/relay.h>
+#include "mt7996.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "mac.h"
+
+#define FW_BIN_LOG_MAGIC 0x44d9c99a
+
+/** global debugfs **/
+
+struct hw_queue_map {
+ const char *name;
+ u8 index;
+ u8 pid;
+ u8 qid;
+};
+
+static int
+mt7996_implicit_txbf_set(void *data, u64 val)
+{
+ struct mt7996_dev *dev = data;
+
+ /* The existing connected stations shall reconnect to apply
+ * new implicit txbf configuration.
+ */
+ dev->ibf = !!val;
+
+ return mt7996_mcu_set_txbf(dev, BF_HW_EN_UPDATE);
+}
+
+static int
+mt7996_implicit_txbf_get(void *data, u64 *val)
+{
+ struct mt7996_dev *dev = data;
+
+ *val = dev->ibf;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7996_implicit_txbf_get,
+ mt7996_implicit_txbf_set, "%lld\n");
+
+/* test knob of system error recovery */
+static ssize_t
+mt7996_fw_ser_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7996_phy *phy = file->private_data;
+ struct mt7996_dev *dev = phy->dev;
+ u8 band_idx = phy->mt76->band_idx;
+ char buf[16];
+ int ret = 0;
+ u16 val;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ if (kstrtou16(buf, 0, &val))
+ return -EINVAL;
+
+ switch (val) {
+ case SER_SET_RECOVER_L1:
+ case SER_SET_RECOVER_L2:
+ case SER_SET_RECOVER_L3_RX_ABORT:
+ case SER_SET_RECOVER_L3_TX_ABORT:
+ case SER_SET_RECOVER_L3_TX_DISABLE:
+ case SER_SET_RECOVER_L3_BF:
+ ret = mt7996_mcu_set_ser(dev, SER_ENABLE, BIT(val), band_idx);
+ if (ret)
+ return ret;
+
+ ret = mt7996_mcu_set_ser(dev, SER_RECOVER, val, band_idx);
+ break;
+ default:
+ break;
+ }
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations mt7996_fw_ser_ops = {
+ .write = mt7996_fw_ser_set,
+ /* TODO: ser read */
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int
+mt7996_radar_trigger(void *data, u64 val)
+{
+ struct mt7996_dev *dev = data;
+
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
+ return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
+ val, 0, 0);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
+ mt7996_radar_trigger, "%lld\n");
+
+static int
+mt7996_rdd_monitor(struct seq_file *s, void *data)
+{
+ struct mt7996_dev *dev = dev_get_drvdata(s->private);
+ struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
+ const char *bw;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!cfg80211_chandef_valid(chandef)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!dev->rdd2_phy) {
+ seq_puts(s, "not running\n");
+ goto out;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = "40";
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = "80";
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = "160";
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = "80P80";
+ break;
+ default:
+ bw = "20";
+ break;
+ }
+
+ seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
+ chandef->chan->hw_value, chandef->chan->center_freq,
+ bw, chandef->center_freq1);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7996_fw_debug_wm_set(void *data, u64 val)
+{
+ struct mt7996_dev *dev = data;
+ enum {
+ DEBUG_TXCMD = 62,
+ DEBUG_CMD_RPT_TX,
+ DEBUG_CMD_RPT_TRIG,
+ DEBUG_SPL,
+ DEBUG_RPT_RX,
+ DEBUG_RPT_RA = 68,
+ } debug;
+ bool tx, rx, en;
+ int ret;
+
+ dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
+
+ if (dev->fw_debug_bin)
+ val = MCU_FW_LOG_RELAY;
+ else
+ val = dev->fw_debug_wm;
+
+ tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
+ rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
+ en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
+
+ ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
+ if (ret)
+ return ret;
+
+ for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RA; debug++) {
+ if (debug == 67)
+ continue;
+
+ if (debug == DEBUG_RPT_RX)
+ val = en && rx;
+ else
+ val = en && tx;
+
+ ret = mt7996_mcu_fw_dbg_ctrl(dev, debug, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+mt7996_fw_debug_wm_get(void *data, u64 *val)
+{
+ struct mt7996_dev *dev = data;
+
+ *val = dev->fw_debug_wm;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wm, mt7996_fw_debug_wm_get,
+ mt7996_fw_debug_wm_set, "%lld\n");
+
+static int
+mt7996_fw_debug_wa_set(void *data, u64 val)
+{
+ struct mt7996_dev *dev = data;
+ int ret;
+
+ dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
+
+ ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa);
+ if (ret)
+ return ret;
+
+ return mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX,
+ !!dev->fw_debug_wa, 0);
+}
+
+static int
+mt7996_fw_debug_wa_get(void *data, u64 *val)
+{
+ struct mt7996_dev *dev = data;
+
+ *val = dev->fw_debug_wa;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7996_fw_debug_wa_get,
+ mt7996_fw_debug_wa_set, "%lld\n");
+
+static struct dentry *
+create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
+ struct rchan_buf *buf, int *is_global)
+{
+ struct dentry *f;
+
+ f = debugfs_create_file("fwlog_data", mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(f))
+ return NULL;
+
+ *is_global = 1;
+
+ return f;
+}
+
+static int
+remove_buf_file_cb(struct dentry *f)
+{
+ debugfs_remove(f);
+
+ return 0;
+}
+
+static int
+mt7996_fw_debug_bin_set(void *data, u64 val)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = create_buf_file_cb,
+ .remove_buf_file = remove_buf_file_cb,
+ };
+ struct mt7996_dev *dev = data;
+
+ if (!dev->relay_fwlog)
+ dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
+ 1500, 512, &relay_cb, NULL);
+ if (!dev->relay_fwlog)
+ return -ENOMEM;
+
+ dev->fw_debug_bin = val;
+
+ relay_reset(dev->relay_fwlog);
+
+ return mt7996_fw_debug_wm_set(dev, dev->fw_debug_wm);
+}
+
+static int
+mt7996_fw_debug_bin_get(void *data, u64 *val)
+{
+ struct mt7996_dev *dev = data;
+
+ *val = dev->fw_debug_bin;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7996_fw_debug_bin_get,
+ mt7996_fw_debug_bin_set, "%lld\n");
+
+static int
+mt7996_fw_util_wa_show(struct seq_file *file, void *data)
+{
+ struct mt7996_dev *dev = file->private;
+
+ if (dev->fw_debug_wa)
+ return mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY),
+ MCU_WA_PARAM_CPU_UTIL, 0, 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7996_fw_util_wa);
+
+static void
+mt7996_ampdu_stat_read_phy(struct mt7996_phy *phy, struct seq_file *file)
+{
+ struct mt7996_dev *dev = phy->dev;
+ int bound[15], range[8], i;
+ u8 band_idx = phy->mt76->band_idx;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < ARRAY_SIZE(range); i++)
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(band_idx, i));
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 2], i % 2) + 1;
+
+ seq_printf(file, "\nPhy %s, Phy band %d\n",
+ wiphy_name(phy->mt76->hw->wiphy), band_idx);
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i] + 1, bound[i + 1]);
+
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+}
+
+static void
+mt7996_txbf_stat_read_phy(struct mt7996_phy *phy, struct seq_file *s)
+{
+ static const char * const bw[] = {
+ "BW20", "BW40", "BW80", "BW160"
+ };
+ struct mib_stats *mib = &phy->mib;
+
+ /* Tx Beamformer monitor */
+ seq_puts(s, "\nTx Beamformer applied PPDU counts: ");
+
+ seq_printf(s, "iBF: %d, eBF: %d\n",
+ mib->tx_bf_ibf_ppdu_cnt,
+ mib->tx_bf_ebf_ppdu_cnt);
+
+ /* Tx Beamformer Rx feedback monitor */
+ seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
+
+ seq_printf(s, "All: %d, HE: %d, VHT: %d, HT: %d, ",
+ mib->tx_bf_rx_fb_all_cnt,
+ mib->tx_bf_rx_fb_he_cnt,
+ mib->tx_bf_rx_fb_vht_cnt,
+ mib->tx_bf_rx_fb_ht_cnt);
+
+ seq_printf(s, "%s, NC: %d, NR: %d\n",
+ bw[mib->tx_bf_rx_fb_bw],
+ mib->tx_bf_rx_fb_nc_cnt,
+ mib->tx_bf_rx_fb_nr_cnt);
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ seq_printf(s, "Tx Beamformee successful feedback frames: %d\n",
+ mib->tx_bf_fb_cpl_cnt);
+ seq_printf(s, "Tx Beamformee feedback triggered counts: %d\n",
+ mib->tx_bf_fb_trig_cnt);
+
+ /* Tx SU & MU counters */
+ seq_printf(s, "Tx multi-user Beamforming counts: %d\n",
+ mib->tx_mu_bf_cnt);
+ seq_printf(s, "Tx multi-user MPDU counts: %d\n", mib->tx_mu_mpdu_cnt);
+ seq_printf(s, "Tx multi-user successful MPDU counts: %d\n",
+ mib->tx_mu_acked_mpdu_cnt);
+ seq_printf(s, "Tx single-user successful MPDU counts: %d\n",
+ mib->tx_su_acked_mpdu_cnt);
+
+ seq_puts(s, "\n");
+}
+
+static int
+mt7996_tx_stats_show(struct seq_file *file, void *data)
+{
+ struct mt7996_phy *phy = file->private;
+ struct mt7996_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ int i;
+ u32 attempts, success, per;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7996_mac_update_stats(phy);
+ mt7996_ampdu_stat_read_phy(phy, file);
+
+ attempts = mib->tx_mpdu_attempts_cnt;
+ success = mib->tx_mpdu_success_cnt;
+ per = attempts ? 100 - success * 100 / attempts : 100;
+ seq_printf(file, "Tx attempts: %8u (MPDUs)\n", attempts);
+ seq_printf(file, "Tx success: %8u (MPDUs)\n", success);
+ seq_printf(file, "Tx PER: %u%%\n", per);
+
+ mt7996_txbf_stat_read_phy(phy, file);
+
+ /* Tx amsdu info */
+ seq_puts(file, "Tx MSDU statistics:\n");
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ",
+ i + 1, mib->tx_amsdu[i]);
+ if (mib->tx_amsdu_cnt)
+ seq_printf(file, "(%3d%%)\n",
+ mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt);
+ else
+ seq_puts(file, "\n");
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7996_tx_stats);
+
+static void
+mt7996_hw_queue_read(struct seq_file *s, u32 size,
+ const struct hw_queue_map *map)
+{
+ struct mt7996_phy *phy = s->private;
+ struct mt7996_dev *dev = phy->dev;
+ u32 i, val;
+
+ val = mt76_rr(dev, MT_FL_Q_EMPTY);
+ for (i = 0; i < size; i++) {
+ u32 ctrl, head, tail, queued;
+
+ if (val & BIT(map[i].index))
+ continue;
+
+ ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
+
+ head = mt76_get_field(dev, MT_FL_Q2_CTRL,
+ GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
+ GENMASK(27, 16));
+ queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+
+ seq_printf(s, "\t%s: ", map[i].name);
+ seq_printf(s, "queued:0x%03x head:0x%03x tail:0x%03x\n",
+ queued, head, tail);
+ }
+}
+
+static void
+mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct seq_file *s = data;
+ u8 ac;
+
+ for (ac = 0; ac < 4; ac++) {
+ u32 qlen, ctrl, val;
+ u32 idx = msta->wcid.idx >> 5;
+ u8 offs = msta->wcid.idx & GENMASK(4, 0);
+
+ ctrl = BIT(31) | BIT(11) | (ac << 24);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+
+ if (val & BIT(offs))
+ continue;
+
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
+ sta->addr, msta->wcid.idx,
+ msta->vif->mt76.wmm_idx, ac, qlen);
+ }
+}
+
+static int
+mt7996_hw_queues_show(struct seq_file *file, void *data)
+{
+ struct mt7996_phy *phy = file->private;
+ struct mt7996_dev *dev = phy->dev;
+ static const struct hw_queue_map ple_queue_map[] = {
+ { "CPU_Q0", 0, 1, MT_CTX0 },
+ { "CPU_Q1", 1, 1, MT_CTX0 + 1 },
+ { "CPU_Q2", 2, 1, MT_CTX0 + 2 },
+ { "CPU_Q3", 3, 1, MT_CTX0 + 3 },
+ { "ALTX_Q0", 8, 2, MT_LMAC_ALTX0 },
+ { "BMC_Q0", 9, 2, MT_LMAC_BMC0 },
+ { "BCN_Q0", 10, 2, MT_LMAC_BCN0 },
+ { "PSMP_Q0", 11, 2, MT_LMAC_PSMP0 },
+ { "ALTX_Q1", 12, 2, MT_LMAC_ALTX0 + 4 },
+ { "BMC_Q1", 13, 2, MT_LMAC_BMC0 + 4 },
+ { "BCN_Q1", 14, 2, MT_LMAC_BCN0 + 4 },
+ { "PSMP_Q1", 15, 2, MT_LMAC_PSMP0 + 4 },
+ };
+ static const struct hw_queue_map pse_queue_map[] = {
+ { "CPU Q0", 0, 1, MT_CTX0 },
+ { "CPU Q1", 1, 1, MT_CTX0 + 1 },
+ { "CPU Q2", 2, 1, MT_CTX0 + 2 },
+ { "CPU Q3", 3, 1, MT_CTX0 + 3 },
+ { "HIF_Q0", 8, 0, MT_HIF0 },
+ { "HIF_Q1", 9, 0, MT_HIF0 + 1 },
+ { "HIF_Q2", 10, 0, MT_HIF0 + 2 },
+ { "HIF_Q3", 11, 0, MT_HIF0 + 3 },
+ { "HIF_Q4", 12, 0, MT_HIF0 + 4 },
+ { "HIF_Q5", 13, 0, MT_HIF0 + 5 },
+ { "LMAC_Q", 16, 2, 0 },
+ { "MDP_TXQ", 17, 2, 1 },
+ { "MDP_RXQ", 18, 2, 2 },
+ { "SEC_TXQ", 19, 2, 3 },
+ { "SEC_RXQ", 20, 2, 4 },
+ };
+ u32 val, head, tail;
+
+ /* ple queue */
+ val = mt76_rr(dev, MT_PLE_FREEPG_CNT);
+ head = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(27, 16));
+ seq_puts(file, "PLE page info:\n");
+ seq_printf(file,
+ "\tTotal free page: 0x%08x head: 0x%03x tail: 0x%03x\n",
+ val, head, tail);
+
+ val = mt76_rr(dev, MT_PLE_PG_HIF_GROUP);
+ head = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(27, 16));
+ seq_printf(file, "\tHIF free page: 0x%03x res: 0x%03x used: 0x%03x\n",
+ val, head, tail);
+
+ seq_puts(file, "PLE non-empty queue info:\n");
+ mt7996_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
+ &ple_queue_map[0]);
+
+ /* iterate per-sta ple queue */
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7996_sta_hw_queue_read, file);
+ /* pse queue */
+ seq_puts(file, "PSE non-empty queue info:\n");
+ mt7996_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
+ &pse_queue_map[0]);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7996_hw_queues);
+
+static int
+mt7996_xmit_queues_show(struct seq_file *file, void *data)
+{
+ struct mt7996_phy *phy = file->private;
+ struct mt7996_dev *dev = phy->dev;
+ struct {
+ struct mt76_queue *q;
+ char *queue;
+ } queue_map[] = {
+ { phy->mt76->q_tx[MT_TXQ_BE], " MAIN" },
+ { dev->mt76.q_mcu[MT_MCUQ_WM], " MCUWM" },
+ { dev->mt76.q_mcu[MT_MCUQ_WA], " MCUWA" },
+ { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWDL" },
+ };
+ int i;
+
+ seq_puts(file, " queue | hw-queued | head | tail |\n");
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_queue *q = queue_map[i].q;
+
+ if (!q)
+ continue;
+
+ seq_printf(file, " %s | %9d | %9d | %9d |\n",
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7996_xmit_queues);
+
+static int
+mt7996_twt_stats(struct seq_file *s, void *data)
+{
+ struct mt7996_dev *dev = dev_get_drvdata(s->private);
+ struct mt7996_twt_flow *iter;
+
+ rcu_read_lock();
+
+ seq_puts(s, " wcid | id | flags | exp | mantissa");
+ seq_puts(s, " | duration | tsf |\n");
+ list_for_each_entry_rcu(iter, &dev->twt_list, list)
+ seq_printf(s,
+ "%9d | %8d | %5c%c%c%c | %8d | %8d | %8d | %14lld |\n",
+ iter->wcid, iter->id,
+ iter->sched ? 's' : 'u',
+ iter->protection ? 'p' : '-',
+ iter->trigger ? 't' : '-',
+ iter->flowtype ? '-' : 'a',
+ iter->exp, iter->mantissa,
+ iter->duration, iter->tsf);
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* The index of RF registers use the generic regidx, combined with two parts:
+ * WF selection [31:24] and offset [23:0].
+ */
+static int
+mt7996_rf_regval_get(void *data, u64 *val)
+{
+ struct mt7996_dev *dev = data;
+ u32 regval;
+ int ret;
+
+ ret = mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &regval, false);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+static int
+mt7996_rf_regval_set(void *data, u64 val)
+{
+ struct mt7996_dev *dev = data;
+
+ return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7996_rf_regval_get,
+ mt7996_rf_regval_set, "0x%08llx\n");
+
+int mt7996_init_debugfs(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs_fops(phy->mt76, NULL);
+ if (!dir)
+ return -ENOMEM;
+ debugfs_create_file("hw-queues", 0400, dir, phy,
+ &mt7996_hw_queues_fops);
+ debugfs_create_file("xmit-queues", 0400, dir, phy,
+ &mt7996_xmit_queues_fops);
+ debugfs_create_file("tx_stats", 0400, dir, phy, &mt7996_tx_stats_fops);
+ debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
+ debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
+ /* TODO: wm fw cpu utilization */
+ debugfs_create_file("fw_util_wa", 0400, dir, dev,
+ &mt7996_fw_util_wa_fops);
+ debugfs_create_file("implicit_txbf", 0600, dir, dev,
+ &fops_implicit_txbf);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
+ mt7996_twt_stats);
+ debugfs_create_file("fw_ser", 0600, dir, phy, &mt7996_fw_ser_ops);
+ debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
+
+ if (phy->mt76->cap.has_5ghz) {
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir,
+ &dev->hw_pattern);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7996_rdd_monitor);
+ }
+
+ if (phy == &dev->phy)
+ dev->debugfs_dir = dir;
+
+ return 0;
+}
+
+static void
+mt7996_debugfs_write_fwlog(struct mt7996_dev *dev, const void *hdr, int hdrlen,
+ const void *data, int len)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ void *dest;
+
+ spin_lock_irqsave(&lock, flags);
+ dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
+ if (dest) {
+ *(u32 *)dest = hdrlen + len;
+ dest += 4;
+
+ if (hdrlen) {
+ memcpy(dest, hdr, hdrlen);
+ dest += hdrlen;
+ }
+
+ memcpy(dest, data, len);
+ relay_flush(dev->relay_fwlog);
+ }
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+void mt7996_debugfs_rx_fw_monitor(struct mt7996_dev *dev, const void *data, int len)
+{
+ struct {
+ __le32 magic;
+ u8 version;
+ u8 _rsv;
+ __le16 serial_id;
+ __le32 timestamp;
+ __le16 msg_type;
+ __le16 len;
+ } hdr = {
+ .version = 0x1,
+ .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
+ .msg_type = cpu_to_le16(PKT_TYPE_RX_FW_MONITOR),
+ };
+
+ if (!dev->relay_fwlog)
+ return;
+
+ hdr.serial_id = cpu_to_le16(dev->fw_debug_seq++);
+ hdr.timestamp = cpu_to_le32(mt76_rr(dev, MT_LPON_FRCR(0)));
+ hdr.len = *(__le16 *)data;
+ mt7996_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
+}
+
+bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len)
+{
+ if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
+ return false;
+
+ if (dev->relay_fwlog)
+ mt7996_debugfs_write_fwlog(dev, NULL, 0, data, len);
+
+ return true;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/** per-station debugfs **/
+
+static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define SHORT_PREAMBLE 0
+#define LONG_PREAMBLE 1
+ struct ieee80211_sta *sta = file->private_data;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = msta->vif->phy->dev;
+ struct ra_rate phy = {};
+ char buf[100];
+ int ret;
+ u16 gi, ltf;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9
+ * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3
+ * nss - vht: 1~4, he: 1~4, others: ignore
+ * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2
+ * gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2
+ * preamble - short: 1, long: 0
+ * ldpc - off: 0, on: 1
+ * stbc - off: 0, on: 1
+ * ltf - 1xltf: 0, 2xltf: 1, 4xltf: 2
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %hhu %hu %hhu %hhu %hhu %hhu %hu",
+ &phy.mode, &phy.bw, &phy.mcs, &phy.nss, &gi,
+ &phy.preamble, &phy.stbc, &phy.ldpc, &phy.spe, &ltf) != 10) {
+ dev_warn(dev->mt76.dev,
+ "format: Mode BW MCS NSS GI Preamble STBC LDPC SPE ltf\n");
+ goto out;
+ }
+
+ phy.wlan_idx = cpu_to_le16(msta->wcid.idx);
+ phy.gi = cpu_to_le16(gi);
+ phy.ltf = cpu_to_le16(ltf);
+ phy.ldpc = phy.ldpc ? 7 : 0;
+ phy.preamble = phy.preamble ? SHORT_PREAMBLE : LONG_PREAMBLE;
+
+ ret = mt7996_mcu_set_fixed_rate_ctrl(dev, &phy, 0);
+ if (ret)
+ return -EFAULT;
+
+out:
+ return count;
+}
+
+static const struct file_operations fops_fixed_rate = {
+ .write = mt7996_sta_fixed_rate_set,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+mt7996_queues_show(struct seq_file *s, void *data)
+{
+ struct ieee80211_sta *sta = s->private;
+
+ mt7996_sta_hw_queue_read(s, sta);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7996_queues);
+
+void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
+ debugfs_create_file("hw-queues", 0400, dir, sta, &mt7996_queues_fops);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
new file mode 100644
index 000000000000..c09fe4274935
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include "mt7996.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int mt7996_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7996_dev *dev;
+
+ dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
+
+ mt76_connac_tx_cleanup(&dev->mt76);
+ if (napi_complete_done(napi, 0))
+ mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
+
+ return 0;
+}
+
+static void mt7996_dma_config(struct mt7996_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->q_wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+} while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ /* rx queue */
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
+
+ /* band0/band1 */
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
+
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
+ RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
+
+ /* data tx queue */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+
+ /* mcu tx queue */
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
+}
+
+static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
+{
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x2));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x20, 0x2));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0xc0, 0x2));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0xe0, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x120, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x140, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x160, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x180, 0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10));
+
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
+}
+
+void mt7996_dma_prefetch(struct mt7996_dev *dev)
+{
+ __mt7996_dma_prefetch(dev, 0);
+ if (dev->hif2)
+ __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
+}
+
+static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
+{
+ u32 hif1_ofs = 0;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ if (reset) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+ }
+ }
+
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
+
+static int mt7996_dma_enable(struct mt7996_dev *dev)
+{
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+
+ /* configure delay interrupt off */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
+ }
+
+ /* configure perfetch settings */
+ mt7996_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (dev->hif2)
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ /* GLO_CFG_EXT0 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
+ WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
+ WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
+
+ /* GLO_CFG_EXT1 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
+ WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
+
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ /* GLO_CFG_EXT0 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
+ WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
+
+ /* GLO_CFG_EXT1 */
+ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
+
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+ }
+
+ if (dev->hif2) {
+ /* fix hardware limitation, pcie1's rx ring3 is not available
+ * so, redirect pcie0 rx ring3 interrupt to pcie1
+ */
+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
+ MT_WFDMA0_RX_INT_SEL_RING3);
+
+ /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
+ }
+
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_RX_DONE_MCU |
+ MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD;
+
+ if (!dev->mphy.band_idx)
+ irq_mask |= MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ if (dev->tbtc_support)
+ irq_mask |= MT_INT_BAND2_RX_DONE;
+
+ mt7996_irq_enable(dev, irq_mask);
+
+ return 0;
+}
+
+int mt7996_dma_init(struct mt7996_dev *dev)
+{
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7996_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7996_dma_disable(dev, true);
+
+ /* init tx queue */
+ ret = mt76_connac_init_tx_queues(dev->phy.mt76,
+ MT_TXQ_ID(dev->mphy.band_idx),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0), 0);
+ if (ret)
+ return ret;
+
+ /* command to WM */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
+ if (ret)
+ return ret;
+
+ /* command to WA */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
+ if (ret)
+ return ret;
+
+ /* firmware download */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7996_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
+ if (ret)
+ return ret;
+
+ /* event from WM */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
+ if (ret)
+ return ret;
+
+ /* event from WA */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ MT_RXQ_ID(MT_RXQ_MCU_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
+ if (ret)
+ return ret;
+
+ /* rx data queue for band0 and band1 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7996_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band0 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+
+ if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) {
+ /* rx data queue for band2 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
+ MT_RXQ_ID(MT_RXQ_BAND2),
+ MT7996_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band2
+ * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
+ */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
+ MT_RXQ_ID(MT_RXQ_BAND2_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76_init_queues(dev, mt76_dma_rx_poll);
+ if (ret < 0)
+ return ret;
+
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7996_poll_tx);
+ napi_enable(&dev->mt76.tx_napi);
+
+ mt7996_dma_enable(dev);
+
+ return 0;
+}
+
+void mt7996_dma_cleanup(struct mt7996_dev *dev)
+{
+ mt7996_dma_disable(dev, true);
+
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
new file mode 100644
index 000000000000..b9f62bedbc48
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/firmware.h>
+#include "mt7996.h"
+#include "eeprom.h"
+
+static int mt7996_check_eeprom(struct mt7996_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u16 val = get_unaligned_le16(eeprom);
+
+ switch (val) {
+ case 0x7990:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static char *mt7996_eeprom_name(struct mt7996_dev *dev)
+{
+ /* reserve for future variants */
+ return MT7996_EEPROM_DEFAULT;
+}
+
+static int
+mt7996_eeprom_load_default(struct mt7996_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ const struct firmware *fw = NULL;
+ int ret;
+
+ ret = request_firmware(&fw, mt7996_eeprom_name(dev), dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data) {
+ dev_err(dev->mt76.dev, "Invalid default bin\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(eeprom, fw->data, MT7996_EEPROM_SIZE);
+ dev->flash_mode = true;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7996_eeprom_load(struct mt7996_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7996_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ dev->flash_mode = true;
+ } else {
+ u8 free_block_num;
+ u32 block_num, i;
+
+ /* TODO: check free block event */
+ mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
+ /* efuse info not enough */
+ if (free_block_num >= 59)
+ return -EINVAL;
+
+ /* read eeprom data from efuse */
+ block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, MT7996_EEPROM_BLOCK_SIZE);
+ for (i = 0; i < block_num; i++)
+ mt7996_mcu_get_eeprom(dev, i * MT7996_EEPROM_BLOCK_SIZE);
+ }
+
+ return mt7996_check_eeprom(dev);
+}
+
+static int mt7996_eeprom_parse_band_config(struct mt7996_phy *phy)
+{
+ u8 *eeprom = phy->dev->mt76.eeprom.data;
+ u32 val = eeprom[MT_EE_WIFI_CONF];
+ int ret = 0;
+
+ switch (phy->mt76->band_idx) {
+ case MT_BAND1:
+ val = FIELD_GET(MT_EE_WIFI_CONF1_BAND_SEL, val);
+ break;
+ case MT_BAND2:
+ val = eeprom[MT_EE_WIFI_CONF + 1];
+ val = FIELD_GET(MT_EE_WIFI_CONF2_BAND_SEL, val);
+ break;
+ default:
+ val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
+ break;
+ }
+
+ switch (val) {
+ case MT_EE_BAND_SEL_2GHZ:
+ phy->mt76->cap.has_2ghz = true;
+ break;
+ case MT_EE_BAND_SEL_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ break;
+ case MT_EE_BAND_SEL_6GHZ:
+ phy->mt76->cap.has_6ghz = true;
+ break;
+ case MT_EE_BAND_SEL_5GHZ_6GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ phy->mt76->cap.has_6ghz = true;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
+{
+ u8 path, nss, band_idx = phy->mt76->band_idx;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+
+ switch (band_idx) {
+ case MT_BAND1:
+ path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 2]);
+ nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 5]);
+ break;
+ case MT_BAND2:
+ path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 2]);
+ nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 5]);
+ break;
+ default:
+ path = FIELD_GET(MT_EE_WIFI_CONF1_TX_PATH_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 1]);
+ nss = FIELD_GET(MT_EE_WIFI_CONF4_STREAM_NUM_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 4]);
+ break;
+ }
+
+ if (!path || path > 4)
+ path = 4;
+
+ nss = min_t(u8, min_t(u8, 4, nss), path);
+
+ mphy->antenna_mask = BIT(nss) - 1;
+ mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx];
+ dev->chainmask |= mphy->chainmask;
+ if (band_idx < MT_BAND2)
+ dev->chainshift[band_idx + 1] = dev->chainshift[band_idx] +
+ hweight16(mphy->chainmask);
+
+ return mt7996_eeprom_parse_band_config(phy);
+}
+
+int mt7996_eeprom_init(struct mt7996_dev *dev)
+{
+ int ret;
+
+ ret = mt7996_eeprom_load(dev);
+ if (ret < 0) {
+ if (ret != -EINVAL)
+ return ret;
+
+ dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
+ ret = mt7996_eeprom_load_default(dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7996_eeprom_parse_hw_cap(dev, &dev->phy);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN);
+ mt76_eeprom_override(&dev->mphy);
+
+ return 0;
+}
+
+int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
+ struct ieee80211_channel *chan)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ int target_power;
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ target_power = eeprom[MT_EE_TX0_POWER_5G +
+ mt7996_get_channel_group_5g(chan->hw_value)];
+ else if (chan->band == NL80211_BAND_6GHZ)
+ target_power = eeprom[MT_EE_TX0_POWER_6G +
+ mt7996_get_channel_group_6g(chan->hw_value)];
+ else
+ target_power = eeprom[MT_EE_TX0_POWER_2G];
+
+ return target_power;
+}
+
+s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u32 val;
+ s8 delta;
+
+ if (band == NL80211_BAND_5GHZ)
+ val = eeprom[MT_EE_RATE_DELTA_5G];
+ else if (band == NL80211_BAND_6GHZ)
+ val = eeprom[MT_EE_RATE_DELTA_6G];
+ else
+ val = eeprom[MT_EE_RATE_DELTA_2G];
+
+ if (!(val & MT_EE_RATE_DELTA_EN))
+ return 0;
+
+ delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
+
+ return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
new file mode 100644
index 000000000000..8da599e0abea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#ifndef __MT7996_EEPROM_H
+#define __MT7996_EEPROM_H
+
+#include "mt7996.h"
+
+enum mt7996_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_MAC_ADDR2 = 0x00a,
+ MT_EE_WIFI_CONF = 0x190,
+ MT_EE_MAC_ADDR3 = 0x2c0,
+ MT_EE_RATE_DELTA_2G = 0x1400,
+ MT_EE_RATE_DELTA_5G = 0x147d,
+ MT_EE_RATE_DELTA_6G = 0x154a,
+ MT_EE_TX0_POWER_2G = 0x1300,
+ MT_EE_TX0_POWER_5G = 0x1301,
+ MT_EE_TX0_POWER_6G = 0x1310,
+
+ __MT_EE_MAX = 0x1dff,
+};
+
+#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
+#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(2, 0)
+#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(5, 3)
+#define MT_EE_WIFI_CONF2_BAND_SEL GENMASK(2, 0)
+
+#define MT_EE_WIFI_CONF1_TX_PATH_BAND0 GENMASK(5, 3)
+#define MT_EE_WIFI_CONF2_TX_PATH_BAND1 GENMASK(5, 3)
+#define MT_EE_WIFI_CONF2_TX_PATH_BAND2 GENMASK(2, 0)
+#define MT_EE_WIFI_CONF4_STREAM_NUM_BAND0 GENMASK(5, 3)
+#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND1 GENMASK(5, 3)
+#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND2 GENMASK(2, 0)
+
+#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0)
+#define MT_EE_RATE_DELTA_SIGN BIT(6)
+#define MT_EE_RATE_DELTA_EN BIT(7)
+
+enum mt7996_eeprom_band {
+ MT_EE_BAND_SEL_DEFAULT,
+ MT_EE_BAND_SEL_2GHZ,
+ MT_EE_BAND_SEL_5GHZ,
+ MT_EE_BAND_SEL_6GHZ,
+ MT_EE_BAND_SEL_5GHZ_6GHZ,
+};
+
+static inline int
+mt7996_get_channel_group_5g(int channel)
+{
+ if (channel <= 64)
+ return 0;
+ if (channel <= 96)
+ return 1;
+ if (channel <= 128)
+ return 2;
+ if (channel <= 144)
+ return 3;
+ return 4;
+}
+
+static inline int
+mt7996_get_channel_group_6g(int channel)
+{
+ if (channel <= 29)
+ return 0;
+
+ return DIV_ROUND_UP(channel - 29, 32);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
new file mode 100644
index 000000000000..46b290526092
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -0,0 +1,823 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/thermal.h>
+#include "mt7996.h"
+#include "mac.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ }, {
+ .max = MT7996_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = MT7996_MAX_INTERFACES,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_80P80),
+ }
+};
+
+static void mt7996_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7996_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7996_dev, mt76);
+
+ /* select TX blink mode, 2: only data frames */
+ mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2);
+
+ /* enable LED */
+ mt76_wr(dev, MT_LED_EN(0), 1);
+
+ /* set LED Tx blink on/off time */
+ val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) |
+ FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off);
+ mt76_wr(dev, MT_LED_TX_BLINK(0), val);
+
+ /* control LED */
+ val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK;
+ if (dev->mt76.led_al)
+ val |= MT_LED_CTRL_POLARITY;
+
+ mt76_wr(dev, MT_LED_CTRL(0), val);
+ mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK);
+}
+
+static int mt7996_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 delta_on = 0, delta_off = 0;
+
+#define HW_TICK 10
+#define TO_HW_TICK(_t) (((_t) > HW_TICK) ? ((_t) / HW_TICK) : HW_TICK)
+
+ if (*delay_on)
+ delta_on = TO_HW_TICK(*delay_on);
+ if (*delay_off)
+ delta_off = TO_HW_TICK(*delay_off);
+
+ mt7996_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void mt7996_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7996_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7996_led_set_config(led_cdev, 0xff, 0);
+}
+
+static void
+mt7996_init_txpower(struct mt7996_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, nss = hweight8(dev->mphy.antenna_mask);
+ int nss_delta = mt76_tx_power_nss_delta(nss);
+ int pwr_delta = mt7996_eeprom_get_power_delta(dev, sband->band);
+ struct mt76_power_limits limits;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ int target_power = mt7996_eeprom_get_target_power(dev, chan);
+
+ target_power += pwr_delta;
+ target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
+ &limits,
+ target_power);
+ target_power += nss_delta;
+ target_power = DIV_ROUND_UP(target_power, 2);
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power);
+ chan->orig_mpwr = target_power;
+ }
+}
+
+static void
+mt7996_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+
+ memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
+ dev->mt76.region = request->dfs_region;
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7996_mcu_rdd_background_enable(phy, NULL);
+
+ mt7996_init_txpower(dev, &phy->mt76->sband_2g.sband);
+ mt7996_init_txpower(dev, &phy->mt76->sband_5g.sband);
+ mt7996_init_txpower(dev, &phy->mt76->sband_6g.sband);
+
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ mt7996_dfs_init_radar_detector(phy);
+}
+
+static void
+mt7996_init_wiphy(struct ieee80211_hw *hw)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt76_dev *mdev = &phy->dev->mt76;
+ struct wiphy *wiphy = hw->wiphy;
+
+ hw->queues = 4;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->netdev_features = NETIF_F_RXCSUM;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
+
+ phy->slottime = 9;
+
+ hw->sta_data_size = sizeof(struct mt7996_sta);
+ hw->vif_data_size = sizeof(struct mt7996_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ wiphy->reg_notifier = mt7996_regd_notifier;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+
+ if (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background"))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+
+ hw->max_tx_fragments = 4;
+
+ if (phy->mt76->cap.has_2ghz)
+ phy->mt76->sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+
+ if (phy->mt76->cap.has_5ghz) {
+ phy->mt76->sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ }
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7996_set_stream_vht_txbf_caps(phy);
+ mt7996_set_stream_he_caps(phy);
+
+ wiphy->available_antennas_rx = phy->mt76->antenna_mask;
+ wiphy->available_antennas_tx = phy->mt76->antenna_mask;
+}
+
+static void
+mt7996_mac_init_band(struct mt7996_dev *dev, u8 band)
+{
+ u32 mask, set;
+
+ /* clear estimated value of EIFS for Rx duration & OBSS time */
+ mt76_wr(dev, MT_WF_RMAC_RSVD0(band), MT_WF_RMAC_RSVD0_EIFS_CLR);
+
+ /* clear backoff time for Rx duration */
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME1(band),
+ MT_WF_RMAC_MIB_NONQOSD_BACKOFF);
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME3(band),
+ MT_WF_RMAC_MIB_QOS01_BACKOFF);
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
+ MT_WF_RMAC_MIB_QOS23_BACKOFF);
+
+ /* clear backoff time and set software compensation for OBSS time */
+ mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
+ set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
+ FIELD_PREP(MT_WF_RMAC_MIB_ED_OFFSET, 4);
+ mt76_rmw(dev, MT_WF_RMAC_MIB_AIRTIME0(band), mask, set);
+
+ /* filter out non-resp frames and get instanstaeous signal reporting */
+ mask = MT_WTBLOFF_RSCR_RCPI_MODE | MT_WTBLOFF_RSCR_RCPI_PARAM;
+ set = FIELD_PREP(MT_WTBLOFF_RSCR_RCPI_MODE, 0) |
+ FIELD_PREP(MT_WTBLOFF_RSCR_RCPI_PARAM, 0x3);
+ mt76_rmw(dev, MT_WTBLOFF_RSCR(band), mask, set);
+}
+
+static void mt7996_mac_init(struct mt7996_dev *dev)
+{
+#define HIF_TXD_V2_1 4
+ int i;
+
+ mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
+
+ for (i = 0; i < MT7996_WTBL_SIZE; i++)
+ mt7996_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
+ mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
+ }
+
+ /* txs report queue */
+ mt76_rmw_field(dev, MT_DMA_TCRF1(0), MT_DMA_TCRF1_QIDX, 0);
+ mt76_rmw_field(dev, MT_DMA_TCRF1(1), MT_DMA_TCRF1_QIDX, 6);
+ mt76_rmw_field(dev, MT_DMA_TCRF1(2), MT_DMA_TCRF1_QIDX, 0);
+
+ /* rro module init */
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
+
+ mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_HW_PATH_HIF_VER,
+ HIF_TXD_V2_1, 0);
+
+ for (i = MT_BAND0; i <= MT_BAND2; i++)
+ mt7996_mac_init_band(dev, i);
+}
+
+static int mt7996_txbf_init(struct mt7996_dev *dev)
+{
+ int ret;
+
+ if (dev->dbdc_support) {
+ ret = mt7996_mcu_set_txbf(dev, BF_MOD_EN_CTRL);
+ if (ret)
+ return ret;
+ }
+
+ /* trigger sounding packets */
+ ret = mt7996_mcu_set_txbf(dev, BF_SOUNDING_ON);
+ if (ret)
+ return ret;
+
+ /* enable eBF */
+ return mt7996_mcu_set_txbf(dev, BF_HW_EN_UPDATE);
+}
+
+static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
+ enum mt76_band_id band)
+{
+ struct mt76_phy *mphy;
+ u32 mac_ofs, hif1_ofs = 0;
+ int ret;
+
+ if (band != MT_BAND1 && band != MT_BAND2)
+ return 0;
+
+ if ((band == MT_BAND1 && !dev->dbdc_support) ||
+ (band == MT_BAND2 && !dev->tbtc_support))
+ return 0;
+
+ if (phy)
+ return 0;
+
+ if (band == MT_BAND2 && dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ mphy->dev->phys[band] = mphy;
+
+ INIT_DELAYED_WORK(&mphy->mac_work, mt7996_mac_work);
+
+ ret = mt7996_eeprom_parse_hw_cap(dev, phy);
+ if (ret)
+ goto error;
+
+ mac_ofs = band == MT_BAND2 ? MT_EE_MAC_ADDR3 : MT_EE_MAC_ADDR2;
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + mac_ofs, ETH_ALEN);
+ /* Make the extra PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ if (!is_valid_ether_addr(mphy->macaddr)) {
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mphy->macaddr[0] |= 2;
+ mphy->macaddr[0] ^= BIT(7);
+ if (band == MT_BAND2)
+ mphy->macaddr[0] ^= BIT(6);
+ }
+ mt76_eeprom_override(mphy);
+
+ /* init wiphy according to mphy and phy */
+ mt7996_init_wiphy(mphy->hw);
+ ret = mt76_connac_init_tx_queues(phy->mt76,
+ MT_TXQ_ID(band),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(band) + hif1_ofs, 0);
+ if (ret)
+ goto error;
+
+ ret = mt76_register_phy(mphy, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ goto error;
+
+ ret = mt7996_init_debugfs(phy);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ mphy->dev->phys[band] = NULL;
+ ieee80211_free_hw(mphy->hw);
+ return ret;
+}
+
+static void
+mt7996_unregister_phy(struct mt7996_phy *phy, enum mt76_band_id band)
+{
+ struct mt76_phy *mphy;
+
+ if (!phy)
+ return;
+
+ mphy = phy->dev->mt76.phys[band];
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+ phy->dev->mt76.phys[band] = NULL;
+}
+
+static void mt7996_init_work(struct work_struct *work)
+{
+ struct mt7996_dev *dev = container_of(work, struct mt7996_dev,
+ init_work);
+
+ mt7996_mcu_set_eeprom(dev);
+ mt7996_mac_init(dev);
+ mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband);
+ mt7996_txbf_init(dev);
+}
+
+void mt7996_wfsys_reset(struct mt7996_dev *dev)
+{
+ mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+
+ mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+}
+
+static int mt7996_init_hardware(struct mt7996_dev *dev)
+{
+ int ret, idx;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ INIT_WORK(&dev->init_work, mt7996_init_work);
+
+ dev->dbdc_support = true;
+ dev->tbtc_support = true;
+
+ ret = mt7996_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ ret = mt7996_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7996_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
+{
+ int sts;
+ u32 *cap;
+
+ if (!phy->mt76->cap.has_5ghz)
+ return;
+
+ sts = hweight16(phy->mt76->chainmask);
+ cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
+ *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ if (sts < 2)
+ return;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE |
+ FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, sts - 1);
+}
+
+static void
+mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
+ struct ieee80211_sta_he_cap *he_cap, int vif)
+{
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ int sts = hweight16(phy->mt76->chainmask);
+ u8 c;
+
+#ifdef CONFIG_MAC80211_MESH
+ if (vif == NL80211_IFTYPE_MESH_POINT)
+ return;
+#endif
+
+ elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[5] &= ~c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
+ elem->phy_cap_info[6] &= ~c;
+
+ elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
+
+ c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ elem->phy_cap_info[2] |= c;
+
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ elem->phy_cap_info[4] |= c;
+
+ /* do not support NG16 due to spec D4.0 changes subcarrier idx */
+ c = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU;
+
+ if (vif == NL80211_IFTYPE_STATION)
+ c |= IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+
+ elem->phy_cap_info[6] |= c;
+
+ if (sts < 2)
+ return;
+
+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
+
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ sts - 1) |
+ FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ sts - 1);
+ elem->phy_cap_info[5] |= c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
+ elem->phy_cap_info[6] |= c;
+
+ c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ elem->phy_cap_info[7] |= c;
+}
+
+static void
+mt7996_gen_ppe_thresh(u8 *he_ppet, int nss)
+{
+ u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
+ static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+
+ he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
+ FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
+ ru_bit_mask);
+
+ ppet_bits = IEEE80211_PPE_THRES_INFO_PPET_SIZE *
+ nss * hweight8(ru_bit_mask) * 2;
+ ppet_size = DIV_ROUND_UP(ppet_bits, 8);
+
+ for (i = 0; i < ppet_size - 1; i++)
+ he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3];
+
+ he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3] &
+ (0xff >> (8 - (ppet_bits - 1) % 8));
+}
+
+static int
+mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data)
+{
+ int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
+ u16 mcs_map = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nss)
+ mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+ }
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs =
+ &he_cap->he_mcs_nss_supp;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif
+ break;
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+
+ he_cap_elem->mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+ he_cap_elem->mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[2] |=
+ IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |=
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
+ else
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ break;
+ }
+
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
+
+ mt7996_set_stream_he_txbf_caps(phy, he_cap, i);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt7996_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US;
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+
+ data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+
+ idx++;
+ }
+
+ return idx;
+}
+
+void mt7996_set_stream_he_caps(struct mt7996_phy *phy)
+{
+ struct ieee80211_sband_iftype_data *data;
+ struct ieee80211_supported_band *band;
+ int n;
+
+ if (phy->mt76->cap.has_2ghz) {
+ data = phy->iftype[NL80211_BAND_2GHZ];
+ n = mt7996_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+
+ band = &phy->mt76->sband_2g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (phy->mt76->cap.has_5ghz) {
+ data = phy->iftype[NL80211_BAND_5GHZ];
+ n = mt7996_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+
+ band = &phy->mt76->sband_5g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (phy->mt76->cap.has_6ghz) {
+ data = phy->iftype[NL80211_BAND_6GHZ];
+ n = mt7996_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+ band = &phy->mt76->sband_6g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+}
+
+int mt7996_register_device(struct mt7996_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int ret;
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_WORK(&dev->rc_work, mt7996_mac_sta_rc_work);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7996_mac_work);
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+ spin_lock_init(&dev->sta_poll_lock);
+
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7996_mac_reset_work);
+
+ ret = mt7996_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ mt7996_init_wiphy(hw);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7996_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7996_led_set_blink;
+ }
+
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ return ret;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
+ ret = mt7996_register_phy(dev, mt7996_phy2(dev), MT_BAND1);
+ if (ret)
+ return ret;
+
+ ret = mt7996_register_phy(dev, mt7996_phy3(dev), MT_BAND2);
+ if (ret)
+ return ret;
+
+ return mt7996_init_debugfs(&dev->phy);
+}
+
+void mt7996_unregister_device(struct mt7996_dev *dev)
+{
+ mt7996_unregister_phy(mt7996_phy3(dev), MT_BAND2);
+ mt7996_unregister_phy(mt7996_phy2(dev), MT_BAND1);
+ mt76_unregister_device(&dev->mt76);
+ mt7996_mcu_exit(dev);
+ mt7996_tx_token_put(dev);
+ mt7996_dma_cleanup(dev);
+ tasklet_disable(&dev->irq_tasklet);
+
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
new file mode 100644
index 000000000000..0b3e28748e76
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -0,0 +1,2498 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7996.h"
+#include "../dma.h"
+#include "mac.h"
+#include "mcu.h"
+
+#define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
+
+#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
+ IEEE80211_RADIOTAP_HE_##f)
+
+static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
+ [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
+ [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
+ [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
+ },
+};
+
+static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ },
+};
+
+static const struct mt7996_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
+ [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
+ [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
+ },
+};
+
+static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
+ u16 idx, bool unicast)
+{
+ struct mt7996_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7996_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+void mt7996_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
+{
+ mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
+ FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
+
+ return MT_WTBL_LMAC_OFFS(wcid, dw);
+}
+
+static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
+{
+ static const u8 ac_to_tid[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7996_sta *msta;
+ struct rate_info *rate;
+ u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ LIST_HEAD(sta_poll_list);
+ int i;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+ u32 addr, val;
+ u16 idx;
+ s8 rssi[4];
+ u8 bw;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&sta_poll_list,
+ struct mt7996_sta, poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ idx = msta->wcid.idx;
+
+ /* refresh peer's airtime reporting */
+ addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+
+ addr += 8;
+ }
+
+ if (clear) {
+ mt7996_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 q = mt76_connac_lmac_mapping(i);
+ u32 tx_cur = tx_time[q];
+ u32 rx_cur = rx_time[q];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
+ }
+
+ /* We don't support reading GI info from txs packets.
+ * For accurate tx status reporting and AQL improvement,
+ * we need to make sure that flags match so polling GI
+ * from per-sta counters directly.
+ */
+ rate = &msta->wcid.rate;
+
+ switch (rate->bw) {
+ case RATE_INFO_BW_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ case RATE_INFO_BW_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case RATE_INFO_BW_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ }
+
+ addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
+ val = mt76_rr(dev, addr);
+ if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ u8 offs = 24 + 2 * bw;
+
+ rate->he_gi = (val & (0x3 << offs)) >> offs;
+ } else if (rate->flags &
+ (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
+ if (val & BIT(12 + bw))
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ else
+ rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ /* get signal strength of resp frames (CTS/BA/ACK) */
+ addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
+ val = mt76_rr(dev, addr);
+
+ rssi[0] = to_rssi(GENMASK(7, 0), val);
+ rssi[1] = to_rssi(GENMASK(15, 8), val);
+ rssi[2] = to_rssi(GENMASK(23, 16), val);
+ rssi[3] = to_rssi(GENMASK(31, 14), val);
+
+ msta->ack_signal =
+ mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+
+ ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ }
+
+ rcu_read_unlock();
+}
+
+void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ u32 addr;
+
+ addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
+ if (enable)
+ mt76_set(dev, addr, BIT(5));
+ else
+ mt76_clear(dev, addr, BIT(5));
+}
+
+static void
+mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ struct ieee80211_radiotap_he *he,
+ __le32 *rxv)
+{
+ u32 ru_h, ru_l;
+ u8 ru, offs = 0;
+
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
+ ru = (u8)(ru_l | ru_h << 4);
+
+ status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+
+ he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+}
+
+static void
+mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
+ HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
+ .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+
+ he_mu = skb_push(skb, sizeof(mu_known));
+ memcpy(he_mu, &mu_known, sizeof(mu_known));
+
+#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
+
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
+ if (status->he_dcm)
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
+
+ he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
+ MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
+ le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
+
+ he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
+
+ if (status->bw >= RATE_INFO_BW_40) {
+ he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
+ he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
+ }
+
+ if (status->bw >= RATE_INFO_BW_80) {
+ he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
+ he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
+ }
+}
+
+static void
+mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
+ HE_BITS(DATA1_DATA_DCM_KNOWN) |
+ HE_BITS(DATA1_STBC_KNOWN) |
+ HE_BITS(DATA1_CODING_KNOWN) |
+ HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
+ HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
+ HE_BITS(DATA1_BSS_COLOR_KNOWN),
+ .data2 = HE_BITS(DATA2_GI_KNOWN) |
+ HE_BITS(DATA2_TXBF_KNOWN) |
+ HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
+ HE_BITS(DATA2_TXOP_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he = NULL;
+ u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he = skb_push(skb, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+
+ he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+ HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
+ le16_encode_bits(ltf_size,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ he->data5 |= HE_BITS(DATA5_TXBF);
+ he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+ HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
+
+ switch (mode) {
+ case MT_PHY_TYPE_HE_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+ HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_EXT_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ HE_BITS(DATA1_UL_DL_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
+
+ mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
+ mt7996_mac_decode_he_mu_radiotap(skb, rxv);
+ break;
+ case MT_PHY_TYPE_HE_TB:
+ he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
+ HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
+
+ mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ default:
+ break;
+ }
+}
+
+/* The HW does not translate the mac header to 802.3 for mesh point */
+static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
+ struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_hdr hdr;
+ u16 frame_control;
+
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
+ MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
+ if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
+ return -EINVAL;
+
+ if (!msta || !msta->vif)
+ return -EINVAL;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ /* store the info from RXD and ethhdr to avoid being overridden */
+ frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
+ hdr.duration_id = 0;
+
+ ether_addr_copy(hdr.addr1, vif->addr);
+ ether_addr_copy(hdr.addr2, sta->addr);
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
+ case 0:
+ ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
+ break;
+ case IEEE80211_FCTL_TODS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ break;
+ case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
+ ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
+ else
+ skb_pull(skb, 2);
+
+ if (ieee80211_has_order(hdr.frame_control))
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
+ if (ieee80211_has_a4(hdr.frame_control))
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ else
+ memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
+
+ return 0;
+}
+
+static int
+mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv, u8 *mode)
+{
+ u32 v0, v2;
+ u8 stbc, gi, bw, dcm, nss;
+ int i, idx;
+ bool cck = false;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ i = idx;
+ nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+
+ stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
+ gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
+ *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
+ dcm = FIELD_GET(MT_PRXV_DCM, v2);
+ bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
+
+ switch (*mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = nss;
+ status->encoding = RX_ENC_VHT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 11)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = dcm;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (*mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ return 0;
+}
+
+static int
+mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7996_phy *phy = &dev->phy;
+ struct ieee80211_supported_band *sband;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *rxv = NULL;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ u32 rxd3 = le32_to_cpu(rxd[3]);
+ u32 rxd4 = le32_to_cpu(rxd[4]);
+ u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_status = *(u32 *)skb->cb;
+ bool unicast, insert_ccmp_hdr = false;
+ u8 remove_pad, amsdu_info, band_idx;
+ u8 mode = 0, qos_ctl = 0;
+ bool hdr_trans;
+ u16 hdr_gap;
+ u16 seq_ctrl = 0;
+ __le16 fc = 0;
+ int idx;
+
+ memset(status, 0, sizeof(*status));
+
+ band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
+ mphy = dev->mt76.phys[band_idx];
+ phy = mphy->priv;
+ status->phy_idx = mphy->band_idx;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+ return -EINVAL;
+
+ hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
+ if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
+ return -EINVAL;
+
+ /* ICV error or CCMP/BIP/WPI MIC error */
+ if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
+
+ if (status->wcid) {
+ struct mt7996_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7996_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (status->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if ((rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+ !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ rxd += 8;
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ u32 v0 = le32_to_cpu(rxd[0]);
+ u32 v2 = le32_to_cpu(rxd[2]);
+
+ fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
+ qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
+ seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
+
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+ case MT_CIPHER_AES_CCMP:
+ case MT_CIPHER_CCMP_CCX:
+ case MT_CIPHER_CCMP_256:
+ insert_ccmp_hdr =
+ FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ fallthrough;
+ case MT_CIPHER_TKIP:
+ case MT_CIPHER_TKIP_NO_MIC:
+ case MT_CIPHER_GCMP:
+ case MT_CIPHER_GCMP_256:
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+ break;
+ default:
+ break;
+ }
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ status->timestamp = le32_to_cpu(rxd[0]);
+ status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != status->timestamp) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = status->timestamp;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ /* RXD Group 3 - P-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
+ u32 v3;
+ int ret;
+
+ rxv = rxd;
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ v3 = le32_to_cpu(rxv[3]);
+
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
+ status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
+ status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
+ status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
+
+ /* RXD Group 5 - C-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ rxd += 24;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
+ if (ret < 0)
+ return ret;
+ }
+
+ amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+ status->amsdu = !!amsdu_info;
+ if (status->amsdu) {
+ status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
+ status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+ }
+
+ hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
+ if (hdr_trans && ieee80211_has_morefrags(fc)) {
+ if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
+ return -EINVAL;
+ hdr_trans = false;
+ } else {
+ int pad_start = 0;
+
+ skb_pull(skb, hdr_gap);
+ if (!hdr_trans && status->amsdu) {
+ pad_start = ieee80211_get_hdrlen_from_skb(skb);
+ } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
+ /* When header translation failure is indicated,
+ * the hardware will insert an extra 2-byte field
+ * containing the data length after the protocol
+ * type field.
+ */
+ pad_start = 12;
+ if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
+ pad_start += 4;
+ else
+ pad_start = 0;
+ }
+
+ if (pad_start) {
+ memmove(skb->data + 2, skb->data, pad_start);
+ skb_pull(skb, 2);
+ }
+ }
+
+ if (!hdr_trans) {
+ struct ieee80211_hdr *hdr;
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = mt76_skb_get_hdr(skb);
+ fc = hdr->frame_control;
+ if (ieee80211_is_data_qos(fc)) {
+ seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+ qos_ctl = *ieee80211_get_qos_ctl(hdr);
+ }
+ } else {
+ status->flag |= RX_FLAG_8023;
+ }
+
+ if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
+ mt7996_mac_decode_he_radiotap(skb, rxv, mode);
+
+ if (!status->wcid || !ieee80211_is_data_qos(fc))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(fc);
+ status->qos_ctl = qos_ctl;
+ status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
+
+ return 0;
+}
+
+static void
+mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid)
+{
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ u8 fc_type, fc_stype;
+ u16 ethertype;
+ bool wmm = false;
+ u32 val;
+
+ if (wcid->sta) {
+ struct ieee80211_sta *sta;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ wmm = sta->wme;
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
+ val |= MT_TXD1_ETH_802_3;
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = IEEE80211_FTYPE_DATA >> 2;
+ fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+
+ txwi[2] |= cpu_to_le32(val);
+}
+
+static void
+mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct ieee80211_key_conf *key)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ __le16 fc = hdr->frame_control;
+ u8 fc_type, fc_stype;
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ tid = MT_TX_ADDBA;
+ else if (ieee80211_is_mgmt(hdr->frame_control))
+ tid = MT_TX_NORMAL;
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ if (!ieee80211_is_data(fc) || multicast ||
+ info->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ val |= MT_TXD1_FIXED_RATE;
+
+ if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD1_BIP;
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+
+ txwi[2] |= cpu_to_le32(val);
+
+ txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
+ if (ieee80211_is_beacon(fc)) {
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
+ txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
+ }
+}
+
+static u16
+mt7996_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
+{
+ u8 mode = 0, band = mphy->chandef.chan->band;
+ int rateidx = 0, mcast_rate;
+
+ if (beacon) {
+ struct cfg80211_bitrate_mask *mask;
+
+ mask = &vif->bss_conf.beacon_tx_rate;
+ if (hweight16(mask->control[band].he_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HE_SU;
+ goto out;
+ } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_VHT;
+ goto out;
+ } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HT;
+ goto out;
+ } else if (hweight32(mask->control[band].legacy) == 1) {
+ rateidx = ffs(mask->control[band].legacy) - 1;
+ goto legacy;
+ }
+ }
+
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast && mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+legacy:
+ rateidx = mt76_calculate_default_rate(mphy, rateidx);
+ mode = rateidx >> 8;
+ rateidx &= GENMASK(7, 0);
+
+out:
+ return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
+ FIELD_PREP(MT_TX_RATE_MODE, mode);
+}
+
+void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+ struct ieee80211_key_conf *key, u32 changed)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ u16 tx_count = 15;
+ u32 val;
+ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED));
+ bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY));
+
+ if (vif) {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+
+ omac_idx = mvif->mt76.omac_idx;
+ wmm_idx = mvif->mt76.wmm_idx;
+ band_idx = mvif->mt76.band_idx;
+ }
+
+ mphy = mt76_dev_phy(&dev->mt76, band_idx);
+
+ if (inband_disc) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_ALTX0;
+ } else if (beacon) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_BCN0;
+ } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ p_fmt = MT_TX_TYPE_CT;
+ q_idx = MT_LMAC_ALTX0;
+ } else {
+ p_fmt = MT_TX_TYPE_CT;
+ q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+
+ if (band_idx)
+ val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
+
+ txwi[1] = cpu_to_le32(val);
+ txwi[2] = 0;
+
+ val = MT_TXD3_SW_POWER_MGMT |
+ FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (key)
+ val |= MT_TXD3_PROTECT_FRAME;
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ val |= MT_TXD3_NO_ACK;
+ if (wcid->amsdu)
+ val |= MT_TXD3_HW_AMSDU;
+
+ txwi[3] = cpu_to_le32(val);
+ txwi[4] = 0;
+
+ val = FIELD_PREP(MT_TXD5_PID, pid);
+ if (pid >= MT_PACKET_ID_FIRST)
+ val |= MT_TXD5_TX_STATUS_HOST;
+ txwi[5] = cpu_to_le32(val);
+
+ val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
+ FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ txwi[6] = cpu_to_le32(val);
+ txwi[7] = 0;
+
+ if (is_8023)
+ mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
+ else
+ mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
+
+ if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
+ /* Fixed rata is available just for 802.11 txd */
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u16 rate = mt7996_mac_tx_rate_val(mphy, vif, beacon, multicast);
+
+ /* fix to bw 20 */
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, 0) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rate);
+
+ txwi[6] |= cpu_to_le32(val);
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+ }
+}
+
+int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_txwi_cache *t;
+ struct mt7996_txp *txp;
+ int id, i, pid, nbuf = tx_info->nbuf - 1;
+ bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ u8 *txwi = (u8 *)txwi_ptr;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ if (sta) {
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->jiffies + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->jiffies = jiffies;
+ }
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ id = mt76_token_consume(mdev, &t);
+ if (id < 0)
+ return id;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ memset(txwi_ptr, 0, MT_TXD_SIZE);
+ /* Transmit non qos data by 802.11 header and need to fill txd by host*/
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid,
+ key, 0);
+
+ txp = (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->mt76.idx;
+ }
+
+ txp->token = cpu_to_le16(id);
+ if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
+ txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ else
+ txp->rept_wds_wcid = cpu_to_le16(0xfff);
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ return 0;
+}
+
+static void
+mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+{
+ struct mt7996_sta *msta;
+ u16 fc, tid;
+ u32 val;
+
+ if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ return;
+
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
+ if (tid >= 6) /* skip VO queue */
+ return;
+
+ val = le32_to_cpu(txwi[2]);
+ fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
+ FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
+ return;
+
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ if (!test_and_set_bit(tid, &msta->ampdu_state))
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+}
+
+static void
+mt7996_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ struct mt7996_txp *txp;
+ int i;
+
+ txp = mt7996_txwi_to_txp(dev, t);
+ for (i = 0; i < txp->nbuf; i++)
+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+static void
+mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, struct list_head *free_list)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_wcid *wcid;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt7996_txp_skb_unmap(mdev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+ if (sta) {
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wcid_idx = wcid->idx;
+
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7996_tx_check_aggr(sta, txwi);
+ } else {
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+
+out:
+ t->skb = NULL;
+ mt76_put_txwi(mdev, t);
+}
+
+static void
+mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
+{
+ __le32 *tx_free = (__le32 *)data, *cur_info;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
+ struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ LIST_HEAD(free_list);
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
+ bool wake = false;
+ u16 total, count = 0;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+ if (phy2) {
+ mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
+ }
+ if (phy3) {
+ mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
+ }
+
+ if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
+ return;
+
+ total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
+ for (cur_info = &tx_free[2]; count < total; cur_info++) {
+ u32 msdu, info;
+ u8 i;
+
+ if (WARN_ON_ONCE((void *)cur_info >= end))
+ return;
+ /* 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ info = le32_to_cpu(*cur_info);
+ if (info & MT_TXFREE_INFO_PAIR) {
+ struct mt7996_sta *msta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7996_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ continue;
+ }
+
+ if (info & MT_TXFREE_INFO_HEADER)
+ continue;
+
+ for (i = 0; i < 2; i++) {
+ msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
+ if (msdu == MT_TXFREE_INFO_MSDU_ID)
+ continue;
+
+ count++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7996_txwi_free(dev, txwi, sta, &free_list);
+ }
+ }
+
+ mt7996_mac_sta_poll(dev);
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+}
+
+static bool
+mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
+ __le32 *txs_data, struct mt76_sta_stats *stats)
+{
+ struct ieee80211_supported_band *sband;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy;
+ struct ieee80211_tx_info *info;
+ struct sk_buff_head list;
+ struct rate_info rate = {};
+ struct sk_buff *skb;
+ bool cck = false;
+ u32 txrate, txs, mode, stbc;
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (!skb)
+ goto out_no_skb;
+
+ txs = le32_to_cpu(txs_data[0]);
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ info->status.rates[0].idx = -1;
+
+ txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+
+ rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
+ rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
+ stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
+
+ if (stbc && rate.nss > 1)
+ rate.nss >>= 1;
+
+ if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
+ stats->tx_nss[rate.nss - 1]++;
+ if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
+ stats->tx_mcs[rate.mcs]++;
+
+ mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ mphy = mt76_dev_phy(mdev, wcid->phy_idx);
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ if (rate.mcs > 31)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ if (rate.mcs > 9)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (rate.mcs > 11)
+ goto out;
+
+ rate.he_gi = wcid->rate.he_gi;
+ rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
+ rate.flags = RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ goto out;
+ }
+
+ stats->tx_mode[mode]++;
+
+ switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_160:
+ rate.bw = RATE_INFO_BW_160;
+ stats->tx_bw[3]++;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate.bw = RATE_INFO_BW_80;
+ stats->tx_bw[2]++;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate.bw = RATE_INFO_BW_40;
+ stats->tx_bw[1]++;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ stats->tx_bw[0]++;
+ break;
+ }
+ wcid->rate = rate;
+
+out:
+ mt76_tx_status_skb_done(mdev, skb, &list);
+
+out_no_skb:
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
+{
+ struct mt7996_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u16 wcidx;
+ u8 pid;
+
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
+ return;
+
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return;
+
+ if (wcidx >= MT7996_WTBL_SIZE)
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7996_sta, wcid);
+
+ mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
+
+ if (!wcid->sta)
+ goto out;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+out:
+ rcu_read_unlock();
+}
+
+bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ if (type != PKT_TYPE_NORMAL) {
+ u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
+
+ if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
+ MT_RXD0_SW_PKT_TYPE_FRAME))
+ return true;
+ }
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7996_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ mt7996_mac_add_txs(dev, rxd);
+ return false;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7996_debugfs_rx_fw_monitor(dev, data, len);
+ return false;
+ default:
+ return true;
+ }
+}
+
+void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb, u32 *info)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ if (type != PKT_TYPE_NORMAL) {
+ u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
+
+ if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
+ MT_RXD0_SW_PKT_TYPE_FRAME))
+ type = PKT_TYPE_NORMAL;
+ }
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7996_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7996_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_TXS:
+ for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ mt7996_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7996_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7996_txp *txp;
+
+ txp = mt7996_txwi_to_txp(mdev, e->txwi);
+ t = mt76_token_put(mdev, le16_to_cpu(txp->token));
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+
+void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
+
+ mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
+ mt76_set(dev, reg, BIT(11) | BIT(9));
+}
+
+void mt7996_mac_reset_counters(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ u8 band_idx = phy->mt76->band_idx;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
+
+ phy->mt76->survey_time = ktime_get_boottime();
+
+ memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
+
+ /* reset airtime counters */
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+
+ mt7996_mcu_get_chan_mib_info(phy, true);
+}
+
+void mt7996_mac_set_timing(struct mt7996_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7996_dev *dev = phy->dev;
+ struct mt7996_phy *phy2 = mt7996_phy2(dev);
+ struct mt7996_phy *phy3 = mt7996_phy3(dev);
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+ u8 band_idx = phy->mt76->band_idx;
+ int offset;
+ bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ if (phy2)
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ phy2->coverage_class);
+
+ if (phy3)
+ coverage_class = max_t(s16, coverage_class,
+ phy3->coverage_class);
+
+ mt76_set(dev, MT_ARB_SCR(band_idx),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+
+ mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(band_idx),
+ FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, 10) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (!a_band)
+ mt76_wr(dev, MT_TMAC_ICR1(band_idx),
+ FIELD_PREP(MT_IFS_EIFS_CCK, 314));
+
+ if (phy->slottime < 20 || a_band)
+ val = MT7996_CFEND_RATE_DEFAULT;
+ else
+ val = MT7996_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR0(band_idx), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(band_idx),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
+{
+ mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
+ MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
+ MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
+
+ mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
+ FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
+}
+
+static u8
+mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
+{
+ static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
+ struct mt7996_dev *dev = phy->dev;
+ u32 val, sum = 0, n = 0;
+ int ant, i;
+
+ for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
+ u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
+
+ for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
+ val = mt76_rr(dev, reg);
+ sum += val * nf_power[i];
+ n += val;
+ }
+ }
+
+ return n ? sum / n : 0;
+}
+
+void mt7996_update_channel(struct mt76_phy *mphy)
+{
+ struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
+ struct mt76_channel_state *state = mphy->chan_state;
+ int nf;
+
+ mt7996_mcu_get_chan_mib_info(phy, false);
+
+ nf = mt7996_phy_get_nf(phy, mphy->band_idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
+
+ state->noise = -(phy->noise >> 4);
+}
+
+static bool
+mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7996_RESET_TIMEOUT);
+
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7996_update_beacons(struct mt7996_dev *dev)
+{
+ struct mt76_phy *phy2, *phy3;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_update_vif_beacon, dev->mt76.hw);
+
+ phy2 = dev->mt76.phys[MT_BAND1];
+ if (!phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_update_vif_beacon, phy2->hw);
+
+ phy3 = dev->mt76.phys[MT_BAND2];
+ if (!phy3)
+ return;
+
+ ieee80211_iterate_active_interfaces(phy3->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_update_vif_beacon, phy3->hw);
+}
+
+static void
+mt7996_dma_reset(struct mt7996_dev *dev)
+{
+ struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
+ struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
+ u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+ int i;
+
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (dev->hif2)
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ if (phy2)
+ mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
+ if (phy3)
+ mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
+ }
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_tx_status_check(&dev->mt76, true);
+
+ /* re-init prefetch settings after reset */
+ mt7996_dma_prefetch(dev);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (dev->hif2)
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+}
+
+void mt7996_tx_token_put(struct mt7996_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ idr_for_each_entry(&dev->mt76.token, txwi, id) {
+ mt7996_txwi_free(dev, txwi, NULL, NULL);
+ dev->mt76.token_count--;
+ }
+ spin_unlock_bh(&dev->mt76.token_lock);
+ idr_destroy(&dev->mt76.token);
+}
+
+/* system error recovery */
+void mt7996_mac_reset_work(struct work_struct *work)
+{
+ struct mt7996_phy *phy2, *phy3;
+ struct mt7996_dev *dev;
+ int i;
+
+ dev = container_of(work, struct mt7996_dev, reset_work);
+ phy2 = mt7996_phy2(dev);
+ phy3 = mt7996_phy3(dev);
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (phy2)
+ ieee80211_stop_queues(phy2->mt76->hw);
+ if (phy3)
+ ieee80211_stop_queues(phy3->mt76->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+ if (phy2) {
+ set_bit(MT76_RESET, &phy2->mt76->state);
+ cancel_delayed_work_sync(&phy2->mt76->mac_work);
+ }
+ if (phy3) {
+ set_bit(MT76_RESET, &phy3->mt76->state);
+ cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ }
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+
+ if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7996_dma_reset(dev);
+
+ mt7996_tx_token_put(dev);
+ idr_init(&dev->mt76.token);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ if (phy2)
+ clear_bit(MT76_RESET, &phy2->mt76->state);
+ if (phy3)
+ clear_bit(MT76_RESET, &phy3->mt76->state);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+ local_bh_enable();
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (phy2)
+ ieee80211_wake_queues(phy2->mt76->hw);
+ if (phy3)
+ ieee80211_wake_queues(phy3->mt76->hw);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7996_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+ MT7996_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(phy2->mt76->hw,
+ &phy2->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
+ if (phy3)
+ ieee80211_queue_delayed_work(phy3->mt76->hw,
+ &phy3->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
+}
+
+void mt7996_mac_update_stats(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ u8 band_idx = phy->mt76->band_idx;
+ u32 cnt;
+ int i;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
+ mib->fcs_err_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
+ mib->rx_fifo_full_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
+ mib->rx_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
+ mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
+ mib->rx_vector_mismatch_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
+ mib->rx_delimiter_fail_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
+ mib->rx_len_mismatch_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
+ mib->tx_ampdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
+ mib->tx_stop_q_empty_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
+ mib->tx_mpdu_attempts_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
+ mib->tx_mpdu_success_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
+ mib->rx_ampdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
+ mib->rx_ampdu_bytes_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
+ mib->rx_ampdu_valid_subframe_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
+ mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
+ mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
+ mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
+ mib->rx_pfdrop_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
+ mib->rx_vec_queue_overflow_drop_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
+ mib->rx_ba_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
+ mib->tx_bf_ebf_ppdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
+ mib->tx_bf_ibf_ppdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
+ mib->tx_mu_bf_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
+ mib->tx_mu_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
+ mib->tx_mu_acked_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
+ mib->tx_su_acked_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
+ mib->tx_bf_rx_fb_ht_cnt += cnt;
+ mib->tx_bf_rx_fb_all_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
+ mib->tx_bf_rx_fb_vht_cnt += cnt;
+ mib->tx_bf_rx_fb_all_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
+ mib->tx_bf_rx_fb_he_cnt += cnt;
+ mib->tx_bf_rx_fb_all_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
+ mib->tx_bf_rx_fb_eht_cnt += cnt;
+ mib->tx_bf_rx_fb_all_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
+ mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
+ mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
+ mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
+ mib->tx_bf_fb_trig_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
+ mib->tx_bf_fb_cpl_cnt += cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ mib->tx_amsdu[i] += cnt;
+ mib->tx_amsdu_cnt += cnt;
+ }
+
+ /* rts count */
+ cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
+ mib->rts_cnt += cnt;
+
+ /* rts retry count */
+ cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
+ mib->rts_retries_cnt += cnt;
+
+ /* ba miss count */
+ cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
+ mib->ba_miss_cnt += cnt;
+
+ /* ack fail count */
+ cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
+ mib->ack_fail_cnt += cnt;
+
+ for (i = 0; i < 16; i++) {
+ cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
+ phy->mt76->aggr_stats[i] += cnt;
+ }
+}
+
+void mt7996_mac_sta_rc_work(struct work_struct *work)
+{
+ struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt7996_sta *msta;
+ u32 changed;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_rc_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7996_sta, rc_list);
+ list_del_init(&msta->rc_list);
+ changed = msta->changed;
+ msta->changed = 0;
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED |
+ IEEE80211_RC_BW_CHANGED))
+ mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
+
+ /* TODO: smps change */
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7996_mac_work(struct work_struct *work)
+{
+ struct mt7996_phy *phy;
+ struct mt76_phy *mphy;
+
+ mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
+ mac_work.work);
+ phy = mphy->priv;
+
+ mutex_lock(&mphy->dev->mutex);
+
+ mt76_update_survey(mphy);
+ if (++mphy->mac_work_count == 5) {
+ mphy->mac_work_count = 0;
+
+ mt7996_mac_update_stats(phy);
+ }
+
+ mutex_unlock(&mphy->dev->mutex);
+
+ mt76_tx_status_check(mphy->dev, false);
+
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7996_WATCHDOG_TIME);
+}
+
+static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
+}
+
+static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
+{
+ int err, region;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
+ MT_RX_SEL0, region);
+ if (err < 0)
+ return err;
+
+ return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
+}
+
+static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7996_dev *dev = phy->dev;
+ u8 band_idx = phy->mt76->band_idx;
+ int err;
+
+ /* start CAC */
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
+ MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ err = mt7996_dfs_start_rdd(dev, band_idx);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(band_idx);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+ chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ err = mt7996_dfs_start_rdd(dev, 1);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(1);
+ }
+
+ return 0;
+}
+
+static int
+mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
+{
+ const struct mt7996_dfs_radar_spec *radar_specs;
+ struct mt7996_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7996_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7996_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ enum mt76_dfs_state dfs_state, prev_state;
+ int err;
+
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+
+ if (prev_state == dfs_state)
+ return 0;
+
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7996_dfs_stop_radar_detector(phy);
+
+ if (dfs_state == MT_DFS_STATE_DISABLED)
+ goto stop;
+
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7996_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7996_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
+
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
+
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
+
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
+ }
+
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
+stop:
+ err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7996_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
+ return 0;
+}
+
+static int
+mt7996_mac_twt_duration_align(int duration)
+{
+ return duration << 8;
+}
+
+static u64
+mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
+ struct mt7996_twt_flow *flow)
+{
+ struct mt7996_twt_flow *iter, *iter_next;
+ u32 duration = flow->duration << 8;
+ u64 start_tsf;
+
+ iter = list_first_entry_or_null(&dev->twt_list,
+ struct mt7996_twt_flow, list);
+ if (!iter || !iter->sched || iter->start_tsf > duration) {
+ /* add flow as first entry in the list */
+ list_add(&flow->list, &dev->twt_list);
+ return 0;
+ }
+
+ list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
+ start_tsf = iter->start_tsf +
+ mt7996_mac_twt_duration_align(iter->duration);
+ if (list_is_last(&iter->list, &dev->twt_list))
+ break;
+
+ if (!iter_next->sched ||
+ iter_next->start_tsf > start_tsf + duration) {
+ list_add(&flow->list, &iter->list);
+ goto out;
+ }
+ }
+
+ /* add flow as last entry in the list */
+ list_add_tail(&flow->list, &dev->twt_list);
+out:
+ return start_tsf;
+}
+
+static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
+{
+ struct ieee80211_twt_params *twt_agrt;
+ u64 interval, duration;
+ u16 mantissa;
+ u8 exp;
+
+ /* only individual agreement supported */
+ if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
+ return -EOPNOTSUPP;
+
+ /* only 256us unit supported */
+ if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
+ return -EOPNOTSUPP;
+
+ twt_agrt = (struct ieee80211_twt_params *)twt->params;
+
+ /* explicit agreement not supported */
+ if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
+ return -EOPNOTSUPP;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
+ le16_to_cpu(twt_agrt->req_type));
+ mantissa = le16_to_cpu(twt_agrt->mantissa);
+ duration = twt_agrt->min_twt_dur << 8;
+
+ interval = (u64)mantissa << exp;
+ if (interval < duration)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt)
+{
+ enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
+ u16 req_type = le16_to_cpu(twt_agrt->req_type);
+ enum ieee80211_twt_setup_cmd sta_setup_cmd;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_twt_flow *flow;
+ int flowid, table_id;
+ u8 exp;
+
+ if (mt7996_mac_check_twt_req(twt))
+ goto out;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
+ goto unlock;
+
+ if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ goto unlock;
+
+ flowid = ffs(~msta->twt.flowid_mask) - 1;
+ le16p_replace_bits(&twt_agrt->req_type, flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
+
+ table_id = ffs(~dev->twt.table_mask) - 1;
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
+ sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
+
+ flow = &msta->twt.flow[flowid];
+ memset(flow, 0, sizeof(*flow));
+ INIT_LIST_HEAD(&flow->list);
+ flow->wcid = msta->wcid.idx;
+ flow->table_id = table_id;
+ flow->id = flowid;
+ flow->duration = twt_agrt->min_twt_dur;
+ flow->mantissa = twt_agrt->mantissa;
+ flow->exp = exp;
+ flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
+ flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
+ flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
+
+ if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
+ sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
+ u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
+ u64 flow_tsf, curr_tsf;
+ u32 rem;
+
+ flow->sched = true;
+ flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
+ curr_tsf = __mt7996_get_tsf(hw, msta->vif);
+ div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
+ flow_tsf = curr_tsf + interval - rem;
+ twt_agrt->twt = cpu_to_le64(flow_tsf);
+ } else {
+ list_add_tail(&flow->list, &dev->twt_list);
+ }
+ flow->tsf = le64_to_cpu(twt_agrt->twt);
+
+ if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
+ goto unlock;
+
+ setup_cmd = TWT_SETUP_CMD_ACCEPT;
+ dev->twt.table_mask |= BIT(table_id);
+ msta->twt.flowid_mask |= BIT(flowid);
+ dev->twt.n_agrt++;
+
+unlock:
+ mutex_unlock(&dev->mt76.mutex);
+out:
+ le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
+ IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
+ (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+}
+
+void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
+ struct mt7996_sta *msta,
+ u8 flowid)
+{
+ struct mt7996_twt_flow *flow;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (flowid >= ARRAY_SIZE(msta->twt.flow))
+ return;
+
+ if (!(msta->twt.flowid_mask & BIT(flowid)))
+ return;
+
+ flow = &msta->twt.flow[flowid];
+ if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
+ MCU_TWT_AGRT_DELETE))
+ return;
+
+ list_del_init(&flow->list);
+ msta->twt.flowid_mask &= ~BIT(flowid);
+ dev->twt.table_mask &= ~BIT(flow->table_id);
+ dev->twt.n_agrt--;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
new file mode 100644
index 000000000000..9f68852012b9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#ifndef __MT7996_MAC_H
+#define __MT7996_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 27)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+
+#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16)
+#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F
+#define MT_RXD0_SW_PKT_TYPE_FRAME 0x3801
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT,
+ PKT_TYPE_RX_FW_MONITOR = 0x0c,
+};
+
+/* RXD DW1 */
+#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(11, 0)
+#define MT_RXD1_NORMAL_GROUP_1 BIT(16)
+#define MT_RXD1_NORMAL_GROUP_2 BIT(17)
+#define MT_RXD1_NORMAL_GROUP_3 BIT(18)
+#define MT_RXD1_NORMAL_GROUP_4 BIT(19)
+#define MT_RXD1_NORMAL_GROUP_5 BIT(20)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
+#define MT_RXD1_NORMAL_CM BIT(23)
+#define MT_RXD1_NORMAL_CLM BIT(24)
+#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
+#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
+#define MT_RXD1_NORMAL_BAND_IDX GENMASK(28, 27)
+#define MT_RXD1_NORMAL_SPP_EN BIT(29)
+#define MT_RXD1_NORMAL_ADD_OM BIT(30)
+#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
+
+/* RXD DW2 */
+#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
+#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
+#define MT_RXD2_NORMAL_HDR_TRANS BIT(7)
+#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 13)
+#define MT_RXD2_NORMAL_SEC_MODE GENMASK(20, 16)
+#define MT_RXD2_NORMAL_MU_BAR BIT(21)
+#define MT_RXD2_NORMAL_SW_BIT BIT(22)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
+#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
+
+/* RXD DW3 */
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
+#define MT_RXD3_NORMAL_U2M BIT(0)
+#define MT_RXD3_NORMAL_HTC_VLD BIT(18)
+#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
+#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
+#define MT_RXD3_NORMAL_CO_ANT BIT(22)
+#define MT_RXD3_NORMAL_FCS_ERR BIT(24)
+#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
+
+/* RXD DW4 */
+#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
+
+#define MT_RXV_HDR_BAND_IDX BIT(24)
+
+/* RXD GROUP4 */
+#define MT_RXD8_FRAME_CONTROL GENMASK(15, 0)
+
+#define MT_RXD10_SEQ_CTRL GENMASK(15, 0)
+#define MT_RXD10_QOS_CTL GENMASK(31, 16)
+
+#define MT_RXD11_HT_CONTROL GENMASK(31, 0)
+
+/* P-RXV */
+#define MT_PRXV_TX_RATE GENMASK(6, 0)
+#define MT_PRXV_TX_DCM BIT(4)
+#define MT_PRXV_TX_ER_SU_106T BIT(5)
+#define MT_PRXV_NSTS GENMASK(10, 7)
+#define MT_PRXV_TXBF BIT(11)
+#define MT_PRXV_HT_AD_CODE BIT(12)
+#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+#define MT_PRXV_RCPI3 GENMASK(31, 24)
+#define MT_PRXV_RCPI2 GENMASK(23, 16)
+#define MT_PRXV_RCPI1 GENMASK(15, 8)
+#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HT_SHORT_GI GENMASK(4, 3)
+#define MT_PRXV_HT_STBC GENMASK(10, 9)
+#define MT_PRXV_TX_MODE GENMASK(14, 11)
+#define MT_PRXV_FRAME_MODE GENMASK(2, 0)
+#define MT_PRXV_DCM BIT(5)
+#define MT_PRXV_NUM_RX BIT(8, 6)
+
+/* C-RXV */
+#define MT_CRXV_HT_STBC GENMASK(1, 0)
+#define MT_CRXV_TX_MODE GENMASK(7, 4)
+#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
+#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
+#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
+#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
+#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
+#define MT_CRXV_HE_UPLINK BIT(31)
+#define MT_CRXV_HE_RU0 GENMASK(7, 0)
+#define MT_CRXV_HE_RU1 GENMASK(15, 8)
+#define MT_CRXV_HE_RU2 GENMASK(23, 16)
+#define MT_CRXV_HE_RU3 GENMASK(31, 24)
+
+#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
+
+#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
+#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
+#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
+
+#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
+#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
+#define MT_CRXV_HE_BEAM_CHNG BIT(13)
+#define MT_CRXV_HE_DOPPLER BIT(16)
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0x20,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x3e
+};
+
+enum tx_mgnt_type {
+ MT_TX_NORMAL,
+ MT_TX_TIMING,
+ MT_TX_ADDBA,
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+#define MT_CT_INFO_FROM_HOST BIT(7)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_Q_IDX GENMASK(31, 25)
+#define MT_TXD0_PKT_FMT GENMASK(24, 23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_FIXED_RATE BIT(31)
+#define MT_TXD1_OWN_MAC GENMASK(30, 25)
+#define MT_TXD1_TID GENMASK(24, 21)
+#define MT_TXD1_BIP BIT(24)
+#define MT_TXD1_ETH_802_3 BIT(20)
+#define MT_TXD1_HDR_INFO GENMASK(20, 16)
+#define MT_TXD1_HDR_FORMAT GENMASK(15, 14)
+#define MT_TXD1_TGID GENMASK(13, 12)
+#define MT_TXD1_WLAN_IDX GENMASK(11, 0)
+
+#define MT_TXD2_POWER_OFFSET GENMASK(31, 26)
+#define MT_TXD2_MAX_TX_TIME GENMASK(25, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_HDR_PAD GENMASK(11, 10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_OWN_MAC_MAP BIT(8)
+#define MT_TXD2_BF_TYPE GENMASK(6, 7)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SW_POWER_MGMT BIT(29)
+#define MT_TXD3_BA_DISABLE BIT(28)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_HW_AMSDU BIT(5)
+#define MT_TXD3_BCM BIT(4)
+#define MT_TXD3_EEOSP BIT(3)
+#define MT_TXD3_EMRD BIT(2)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_FL BIT(15)
+#define MT_TXD5_BYPASS_TBB BIT(14)
+#define MT_TXD5_BYPASS_RBB BIT(13)
+#define MT_TXD5_BSS_COLOR_ZERO BIT(12)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_TX_SRC GENMASK(31, 30)
+#define MT_TXD6_VTA BIT(28)
+#define MT_TXD6_FIXED_BW BIT(25)
+#define MT_TXD6_BW GENMASK(24, 22)
+#define MT_TXD6_TX_RATE GENMASK(21, 16)
+#define MT_TXD6_TIMESTAMP_OFS_EN BIT(15)
+#define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10)
+#define MT_TXD6_MSDU_CNT GENMASK(9, 4)
+#define MT_TXD6_SPE_ID_IDX BIT(10)
+#define MT_TXD6_ANT_ID GENMASK(7, 4)
+#define MT_TXD6_DIS_MAT BIT(3)
+#define MT_TXD6_DAS BIT(2)
+#define MT_TXD6_AMSDU_CAP BIT(1)
+
+#define MT_TXD7_TXD_LEN GENMASK(31, 30)
+#define MT_TXD7_IP_SUM BIT(29)
+#define MT_TXD7_DROP_BY_SDO BIT(28)
+#define MT_TXD7_MAC_TXD BIT(27)
+#define MT_TXD7_CTXD BIT(26)
+#define MT_TXD7_CTXD_CNT GENMASK(25, 22)
+#define MT_TXD7_UDP_TCP_SUM BIT(15)
+#define MT_TXD7_TX_TIME GENMASK(9, 0)
+
+#define MT_TX_RATE_STBC BIT(13)
+#define MT_TX_RATE_NSS GENMASK(13, 10)
+#define MT_TX_RATE_MODE GENMASK(9, 6)
+#define MT_TX_RATE_SU_EXT_TONE BIT(5)
+#define MT_TX_RATE_DCM BIT(4)
+/* VHT/HE only use bits 0-3 */
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+struct mt7996_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ __le16 rept_wds_wcid;
+ u8 nbuf;
+#define MT_TXP_MAX_BUF_NUM 6
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+#define MT_TXFREE0_PKT_TYPE GENMASK(31, 27)
+#define MT_TXFREE0_MSDU_CNT GENMASK(25, 16)
+#define MT_TXFREE0_RX_BYTE GENMASK(15, 0)
+
+#define MT_TXFREE1_VER GENMASK(18, 16)
+
+#define MT_TXFREE_INFO_PAIR BIT(31)
+#define MT_TXFREE_INFO_HEADER BIT(30)
+#define MT_TXFREE_INFO_WLAN_ID GENMASK(23, 12)
+#define MT_TXFREE_INFO_MSDU_ID GENMASK(14, 0)
+
+#define MT_TXS0_BW GENMASK(31, 29)
+#define MT_TXS0_TID GENMASK(28, 26)
+#define MT_TXS0_AMPDU BIT(25)
+#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TX_RATE GENMASK(13, 0)
+
+#define MT_TXS1_SEQNO GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
+#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
+
+#define MT_TXS2_BF_STATUS GENMASK(31, 30)
+#define MT_TXS2_BAND GENMASK(29, 28)
+#define MT_TXS2_WCID GENMASK(27, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_PID GENMASK(31, 24)
+#define MT_TXS3_RATE_STBC BIT(7)
+#define MT_TXS3_FIXED_RATE BIT(6)
+#define MT_TXS3_SRC GENMASK(5, 4)
+#define MT_TXS3_SHARED_ANTENNA BIT(3)
+#define MT_TXS3_LAST_TX_RATE GENMASK(2, 0)
+
+#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
+
+#define MT_TXS5_F0_FINAL_MPDU BIT(31)
+#define MT_TXS5_F0_QOS BIT(30)
+#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25)
+#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS5_F1_MPDU_TX_COUNT GENMASK(31, 24)
+#define MT_TXS5_F1_MPDU_TX_BYTES GENMASK(23, 0)
+
+#define MT_TXS6_F0_NOISE_3 GENMASK(31, 24)
+#define MT_TXS6_F0_NOISE_2 GENMASK(23, 16)
+#define MT_TXS6_F0_NOISE_1 GENMASK(15, 8)
+#define MT_TXS6_F0_NOISE_0 GENMASK(7, 0)
+#define MT_TXS6_F1_MPDU_FAIL_COUNT GENMASK(31, 24)
+#define MT_TXS6_F1_MPDU_FAIL_BYTES GENMASK(23, 0)
+
+#define MT_TXS7_F0_RCPI_3 GENMASK(31, 24)
+#define MT_TXS7_F0_RCPI_2 GENMASK(23, 16)
+#define MT_TXS7_F0_RCPI_1 GENMASK(15, 8)
+#define MT_TXS7_F0_RCPI_0 GENMASK(7, 0)
+#define MT_TXS7_F1_MPDU_RETRY_COUNT GENMASK(31, 24)
+#define MT_TXS7_F1_MPDU_RETRY_BYTES GENMASK(23, 0)
+
+struct mt7996_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7996_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ u32 min_stgpr_diff;
+} __packed;
+
+struct mt7996_dfs_radar_spec {
+ struct mt7996_dfs_pulse pulse_th;
+ struct mt7996_dfs_pattern radar_pattern[16];
+};
+
+static inline struct mt7996_txp *
+mt7996_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
new file mode 100644
index 000000000000..4421cd54311b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -0,0 +1,1334 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include "mt7996.h"
+#include "mcu.h"
+
+static bool mt7996_dev_running(struct mt7996_dev *dev)
+{
+ struct mt7996_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7996_phy2(dev);
+ if (phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return true;
+
+ phy = mt7996_phy3(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
+
+static int mt7996_start(struct ieee80211_hw *hw)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ bool running;
+ int ret;
+
+ flush_work(&dev->init_work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7996_dev_running(dev);
+ if (!running) {
+ ret = mt7996_mcu_set_hdr_trans(dev, true);
+ if (ret)
+ goto out;
+ }
+
+ mt7996_mac_enable_nf(dev, phy->mt76->band_idx);
+
+ ret = mt7996_mcu_set_rts_thresh(phy, 0x92b);
+ if (ret)
+ goto out;
+
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
+ if (ret)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ ieee80211_iterate_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_mcu_set_pm, dev->mt76.hw);
+
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
+
+ if (!running)
+ mt7996_mac_reset_counters(phy);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7996_stop(struct ieee80211_hw *hw)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ ieee80211_iterate_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_mcu_set_pm, dev->mt76.hw);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static inline int get_free_idx(u32 mask, u8 start, u8 end)
+{
+ return ffs(~mask & GENMASK(end, start));
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u64 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ /* prefer hw bssid slot 1-3 */
+ i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
+ if (i)
+ return i - 1;
+
+ if (type != NL80211_IFTYPE_STATION)
+ break;
+
+ i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+ if (i)
+ return i - 1;
+
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP:
+ /* ap uses hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+ if (i)
+ return i - 1;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -1;
+}
+
+static void mt7996_init_bitrate_mask(struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
+ mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
+ mvif->bitrate_mask.control[i].he_gi = 0xff;
+ mvif->bitrate_mask.control[i].he_ltf = 0xff;
+ mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+ memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(mvif->bitrate_mask.control[i].ht_mcs));
+ memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(mvif->bitrate_mask.control[i].vht_mcs));
+ memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(mvif->bitrate_mask.control[i].he_mcs));
+ }
+}
+
+static int mt7996_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt76_txq *mtxq;
+ u8 band_idx = phy->mt76->band_idx;
+ int idx, ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ phy->monitor_vif = vif;
+
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mvif->mt76.idx >= (MT7996_MAX_INTERFACES << dev->dbdc_support)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->mt76.omac_idx = idx;
+ mvif->phy = phy;
+ mvif->mt76.band_idx = band_idx;
+ mvif->mt76.wmm_idx = band_idx;
+
+ ret = mt7996_mcu_add_dev_info(phy, vif, true);
+ if (ret)
+ goto out;
+
+ ret = mt7996_mcu_set_radio_en(phy, true);
+ if (ret)
+ goto out;
+
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
+ phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+
+ idx = MT7996_WTBL_RESERVED - mvif->mt76.idx;
+
+ INIT_LIST_HEAD(&mvif->sta.rc_list);
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.phy_idx = band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_packet_id_init(&mvif->sta.wcid);
+
+ mt7996_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = idx;
+ }
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
+ vif->offload_flags = 0;
+ vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+
+ mt7996_init_bitrate_mask(vif);
+ memset(&mvif->cap, -1, sizeof(mvif->cap));
+
+ mt7996_mcu_add_bss_info(phy, vif, true);
+ mt7996_mcu_add_sta(dev, vif, NULL, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7996_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta = &mvif->sta;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ int idx = msta->wcid.idx;
+
+ mt7996_mcu_add_bss_info(phy, vif, false);
+ mt7996_mcu_add_sta(dev, vif, NULL, false);
+
+ if (vif == phy->monitor_vif)
+ phy->monitor_vif = NULL;
+
+ mt7996_mcu_add_dev_info(phy, vif, false);
+ mt7996_mcu_set_radio_en(phy, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
+ phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
+ mutex_unlock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt76_packet_id_flush(&dev->mt76, &msta->wcid);
+}
+
+int mt7996_set_channel(struct mt7996_phy *phy)
+{
+ struct mt7996_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &phy->mt76->state);
+
+ mt76_set_channel(phy->mt76);
+
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH);
+ if (ret)
+ goto out;
+
+ mt7996_mac_set_timing(phy);
+ ret = mt7996_dfs_init_radar_detector(phy);
+ mt7996_mac_cca_stats_reset(phy);
+
+ mt7996_mac_reset_counters(phy);
+ phy->noise = 0;
+
+out:
+ clear_bit(MT76_RESET, &phy->mt76->state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_txq_schedule_all(phy->mt76);
+
+ ieee80211_queue_delayed_work(phy->mt76->hw,
+ &phy->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
+
+ return ret;
+}
+
+static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
+ int idx = key->keyidx;
+ int err = 0;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_SMS4:
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(phy, vif, true);
+ }
+
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ else
+ goto out;
+
+ mt76_wcid_key_setup(&dev->mt76, wcid,
+ cmd == SET_KEY ? key : NULL);
+
+ err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ int ret;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7996_set_channel(phy);
+ if (ret)
+ return ret;
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ if (!enabled)
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(phy->mt76->band_idx),
+ MT_DMA_DCR0_RXD_G5_EN, enabled);
+ mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+ queue = mt76_connac_lmac_mapping(queue);
+ mvif->queue_params[queue] = *params;
+
+ return 0;
+}
+
+static void mt7996_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1(phy->mt76->band_idx), ctl_flags);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7996_update_bss_color(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *bss_color)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP: {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+
+ if (mvif->mt76.omac_idx > HW_BSSID_MAX)
+ return;
+ fallthrough;
+ }
+ case NL80211_IFTYPE_STATION:
+ mt7996_mcu_update_bss_color(dev, vif, bss_color);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /* station mode uses BSSID to map the wlan entry to a peer,
+ * and then peer references bss_info_rfch to set bandwidth cap.
+ */
+ if (changed & BSS_CHANGED_BSSID &&
+ vif->type == NL80211_IFTYPE_STATION) {
+ bool join = !is_zero_ether_addr(info->bssid);
+
+ mt7996_mcu_add_bss_info(phy, vif, join);
+ mt7996_mcu_add_sta(dev, vif, NULL, join);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7996_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7996_mac_enable_rtscts(dev, vif, info->use_cts_prot);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7996_mac_set_timing(phy);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
+ mt7996_mcu_add_bss_info(phy, vif, true);
+ mt7996_mcu_add_sta(dev, vif, NULL, true);
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7996_mcu_set_tx(dev, vif);
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ mt7996_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR)
+ mt7996_update_bss_color(hw, vif, &info->he_bss_color);
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
+
+ if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+ changed & BSS_CHANGED_FILS_DISCOVERY)
+ mt7996_mcu_beacon_inband_discov(dev, vif, changed);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7996_mcu_add_beacon(hw, vif, true);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ u8 band_idx = mvif->phy->mt76->band_idx;
+ int ret, idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->rc_list);
+ INIT_LIST_HEAD(&msta->poll_list);
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.phy_idx = band_idx;
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->jiffies = jiffies;
+
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
+ mt7996_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ ret = mt7996_mcu_add_sta(dev, vif, sta, true);
+ if (ret)
+ return ret;
+
+ return mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
+}
+
+void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ int i;
+
+ mt7996_mcu_add_sta(dev, vif, sta, false);
+
+ mt7996_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7996_mac_twt_teardown_flow(dev, msta, i);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ if (!list_empty(&msta->rc_list))
+ list_del_init(&msta->rc_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7996_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt7996_sta *sta;
+
+ sta = (struct mt7996_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7996_vif *mvif;
+
+ mvif = (struct mt7996_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(mphy, control->sta, wcid, skb);
+}
+
+static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ int ret;
+
+ mutex_lock(&phy->dev->mt76.mutex);
+ ret = mt7996_mcu_set_rts_thresh(phy, val);
+ mutex_unlock(&phy->dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ ret = mt7996_mcu_add_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ ret = mt7996_mcu_add_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7996_mcu_add_tx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7996_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7996_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7996_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+ u16 n;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
+ /* TSF software read */
+ mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(phy->mt76->band_idx));
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(phy->mt76->band_idx));
+
+ return tsf.t64;
+}
+
+static u64
+mt7996_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ u64 ret;
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = __mt7996_get_tsf(hw, mvif);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void
+mt7996_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 timestamp)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
+ /* TSF software overwrite */
+ mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 timestamp)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
+ /* TSF software adjust*/
+ mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7996_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = phy->dev;
+
+ mutex_lock(&dev->mt76.mutex);
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7996_mac_set_timing(phy);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ u8 band_idx = phy->mt76->band_idx, shift = dev->chainshift[band_idx];
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+
+ /* restore to the origin chainmask which might have auxiliary path */
+ if (hweight8(tx_ant) == max_nss)
+ phy->mt76->chainmask = (dev->chainmask >> shift) << shift;
+ else
+ phy->mt76->chainmask = tx_ant << shift;
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7996_set_stream_vht_txbf_caps(phy);
+ mt7996_set_stream_he_caps(phy);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7996_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct rate_info *txrate = &msta->wcid.rate;
+
+ if (!txrate->legacy && !txrate->flags)
+ return;
+
+ if (txrate->legacy) {
+ sinfo->txrate.legacy = txrate->legacy;
+ } else {
+ sinfo->txrate.mcs = txrate->mcs;
+ sinfo->txrate.nss = txrate->nss;
+ sinfo->txrate.bw = txrate->bw;
+ sinfo->txrate.he_gi = txrate->he_gi;
+ sinfo->txrate.he_dcm = txrate->he_dcm;
+ sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
+ }
+ sinfo->txrate.flags = txrate->flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
+}
+
+static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = msta->vif->phy->dev;
+ u32 *changed = data;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ msta->changed |= *changed;
+ if (list_empty(&msta->rc_list))
+ list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7996_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = phy->dev;
+
+ mt7996_sta_rc_work(&changed, sta);
+ ieee80211_queue_work(hw, &dev->rc_work);
+}
+
+static int
+mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = phy->dev;
+ u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+
+ mvif->bitrate_mask = *mask;
+
+ /* if multiple rates across different preambles are given we can
+ * reconfigure this info with all peers using sta_rec command with
+ * the below exception cases.
+ * - single rate : if a rate is passed along with different preambles,
+ * we select the highest one as fixed rate. i.e VHT MCS for VHT peers.
+ * - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
+ * then multiple MCS setting (MCS 4,5,6) is not supported.
+ */
+ ieee80211_iterate_stations_atomic(hw, mt7996_sta_rc_work, &changed);
+ ieee80211_queue_work(hw, &dev->rc_work);
+
+ return 0;
+}
+
+static void mt7996_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+
+ mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+}
+
+static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+ mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+}
+
+static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_ampdu_cnt",
+ "tx_stop_q_empty_cnt",
+ "tx_mpdu_attempts",
+ "tx_mpdu_success",
+ "tx_rwp_fail_cnt",
+ "tx_rwp_need_cnt",
+ "tx_pkt_ebf_cnt",
+ "tx_pkt_ibf_cnt",
+ "tx_ampdu_len:0-1",
+ "tx_ampdu_len:2-10",
+ "tx_ampdu_len:11-19",
+ "tx_ampdu_len:20-28",
+ "tx_ampdu_len:29-37",
+ "tx_ampdu_len:38-46",
+ "tx_ampdu_len:47-55",
+ "tx_ampdu_len:56-79",
+ "tx_ampdu_len:80-103",
+ "tx_ampdu_len:104-127",
+ "tx_ampdu_len:128-151",
+ "tx_ampdu_len:152-175",
+ "tx_ampdu_len:176-199",
+ "tx_ampdu_len:200-223",
+ "tx_ampdu_len:224-247",
+ "ba_miss_count",
+ "tx_beamformer_ppdu_iBF",
+ "tx_beamformer_ppdu_eBF",
+ "tx_beamformer_rx_feedback_all",
+ "tx_beamformer_rx_feedback_he",
+ "tx_beamformer_rx_feedback_vht",
+ "tx_beamformer_rx_feedback_ht",
+ "tx_beamformer_rx_feedback_bw", /* zero based idx: 20, 40, 80, 160 */
+ "tx_beamformer_rx_feedback_nc",
+ "tx_beamformer_rx_feedback_nr",
+ "tx_beamformee_ok_feedback_pkts",
+ "tx_beamformee_feedback_trig",
+ "tx_mu_beamforming",
+ "tx_mu_mpdu",
+ "tx_mu_successful_mpdu",
+ "tx_su_successful_mpdu",
+ "tx_msdu_pack_1",
+ "tx_msdu_pack_2",
+ "tx_msdu_pack_3",
+ "tx_msdu_pack_4",
+ "tx_msdu_pack_5",
+ "tx_msdu_pack_6",
+ "tx_msdu_pack_7",
+ "tx_msdu_pack_8",
+
+ /* rx counters */
+ "rx_fifo_full_cnt",
+ "rx_mpdu_cnt",
+ "channel_idle_cnt",
+ "rx_vector_mismatch_cnt",
+ "rx_delimiter_fail_cnt",
+ "rx_len_mismatch_cnt",
+ "rx_ampdu_cnt",
+ "rx_ampdu_bytes_cnt",
+ "rx_ampdu_valid_subframe_cnt",
+ "rx_ampdu_valid_subframe_b_cnt",
+ "rx_pfdrop_cnt",
+ "rx_vec_queue_overflow_drop_cnt",
+ "rx_ba_cnt",
+
+ /* per vif counters */
+ "v_tx_mode_cck",
+ "v_tx_mode_ofdm",
+ "v_tx_mode_ht",
+ "v_tx_mode_ht_gf",
+ "v_tx_mode_vht",
+ "v_tx_mode_he_su",
+ "v_tx_mode_he_ext_su",
+ "v_tx_mode_he_tb",
+ "v_tx_mode_he_mu",
+ "v_tx_bw_20",
+ "v_tx_bw_40",
+ "v_tx_bw_80",
+ "v_tx_bw_160",
+ "v_tx_mcs_0",
+ "v_tx_mcs_1",
+ "v_tx_mcs_2",
+ "v_tx_mcs_3",
+ "v_tx_mcs_4",
+ "v_tx_mcs_5",
+ "v_tx_mcs_6",
+ "v_tx_mcs_7",
+ "v_tx_mcs_8",
+ "v_tx_mcs_9",
+ "v_tx_mcs_10",
+ "v_tx_mcs_11",
+};
+
+#define MT7996_SSTATS_LEN ARRAY_SIZE(mt7996_gstrings_stats)
+
+/* Ethtool related API */
+static
+void mt7996_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *mt7996_gstrings_stats,
+ sizeof(mt7996_gstrings_stats));
+}
+
+static
+int mt7996_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return MT7996_SSTATS_LEN;
+
+ return 0;
+}
+
+static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
+{
+ struct mt76_ethtool_worker_info *wi = wi_data;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+
+ if (msta->vif->mt76.idx != wi->idx)
+ return;
+
+ mt76_ethtool_worker(wi, &msta->stats);
+}
+
+static
+void mt7996_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_ethtool_worker_info wi = {
+ .data = data,
+ .idx = mvif->mt76.idx,
+ };
+ struct mib_stats *mib = &phy->mib;
+ /* See mt7996_ampdu_stat_read_phy, etc */
+ int i, ei = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7996_mac_update_stats(phy);
+
+ data[ei++] = mib->tx_ampdu_cnt;
+ data[ei++] = mib->tx_stop_q_empty_cnt;
+ data[ei++] = mib->tx_mpdu_attempts_cnt;
+ data[ei++] = mib->tx_mpdu_success_cnt;
+ data[ei++] = mib->tx_rwp_fail_cnt;
+ data[ei++] = mib->tx_rwp_need_cnt;
+ data[ei++] = mib->tx_bf_ebf_ppdu_cnt;
+ data[ei++] = mib->tx_bf_ibf_ppdu_cnt;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
+ data[ei++] = phy->mt76->aggr_stats[i];
+ data[ei++] = phy->mib.ba_miss_cnt;
+
+ /* Tx Beamformer monitor */
+ data[ei++] = mib->tx_bf_ibf_ppdu_cnt;
+ data[ei++] = mib->tx_bf_ebf_ppdu_cnt;
+
+ /* Tx Beamformer Rx feedback monitor */
+ data[ei++] = mib->tx_bf_rx_fb_all_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_he_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_vht_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_ht_cnt;
+
+ data[ei++] = mib->tx_bf_rx_fb_bw;
+ data[ei++] = mib->tx_bf_rx_fb_nc_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_nr_cnt;
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ data[ei++] = mib->tx_bf_fb_cpl_cnt;
+ data[ei++] = mib->tx_bf_fb_trig_cnt;
+
+ /* Tx SU & MU counters */
+ data[ei++] = mib->tx_mu_bf_cnt;
+ data[ei++] = mib->tx_mu_mpdu_cnt;
+ data[ei++] = mib->tx_mu_acked_mpdu_cnt;
+ data[ei++] = mib->tx_su_acked_mpdu_cnt;
+
+ /* Tx amsdu info (pack-count histogram) */
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++)
+ data[ei++] = mib->tx_amsdu[i];
+
+ /* rx counters */
+ data[ei++] = mib->rx_fifo_full_cnt;
+ data[ei++] = mib->rx_mpdu_cnt;
+ data[ei++] = mib->channel_idle_cnt;
+ data[ei++] = mib->rx_vector_mismatch_cnt;
+ data[ei++] = mib->rx_delimiter_fail_cnt;
+ data[ei++] = mib->rx_len_mismatch_cnt;
+ data[ei++] = mib->rx_ampdu_cnt;
+ data[ei++] = mib->rx_ampdu_bytes_cnt;
+ data[ei++] = mib->rx_ampdu_valid_subframe_cnt;
+ data[ei++] = mib->rx_ampdu_valid_subframe_bytes_cnt;
+ data[ei++] = mib->rx_pfdrop_cnt;
+ data[ei++] = mib->rx_vec_queue_overflow_drop_cnt;
+ data[ei++] = mib->rx_ba_cnt;
+
+ /* Add values for all stations owned by this vif */
+ wi.initial_stat_idx = ei;
+ ieee80211_iterate_stations_atomic(hw, mt7996_ethtool_worker, &wi);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (wi.sta_count == 0)
+ return;
+
+ ei += wi.worker_stat_count;
+ if (ei != MT7996_SSTATS_LEN)
+ dev_err(dev->mt76.dev, "ei: %d MT7996_SSTATS_LEN: %d",
+ ei, (int)MT7996_SSTATS_LEN);
+}
+
+static void
+mt7996_twt_teardown_request(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 flowid)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7996_mac_twt_teardown_flow(dev, msta, flowid);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt7996_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_dev *dev = phy->dev;
+ int ret = -EINVAL;
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ goto out;
+
+ if (dev->rdd2_phy && dev->rdd2_phy != phy) {
+ /* rdd2 is already locked */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* rdd2 already configured on a radar channel */
+ running = dev->rdd2_phy &&
+ cfg80211_chandef_valid(&dev->rdd2_chandef) &&
+ !!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
+
+ if (!chandef || running ||
+ !(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
+ ret = mt7996_mcu_rdd_background_enable(phy, NULL);
+ if (ret)
+ goto out;
+
+ if (!running)
+ goto update_phy;
+ }
+
+ ret = mt7996_mcu_rdd_background_enable(phy, chandef);
+ if (ret)
+ goto out;
+
+update_phy:
+ dev->rdd2_phy = chandef ? phy : NULL;
+ if (chandef)
+ dev->rdd2_chandef = *chandef;
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+const struct ieee80211_ops mt7996_ops = {
+ .tx = mt7996_tx,
+ .start = mt7996_start,
+ .stop = mt7996_stop,
+ .add_interface = mt7996_add_interface,
+ .remove_interface = mt7996_remove_interface,
+ .config = mt7996_config,
+ .conf_tx = mt7996_conf_tx,
+ .configure_filter = mt7996_configure_filter,
+ .bss_info_changed = mt7996_bss_info_changed,
+ .sta_add = mt7996_sta_add,
+ .sta_remove = mt7996_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .sta_rc_update = mt7996_sta_rc_update,
+ .set_key = mt7996_set_key,
+ .ampdu_action = mt7996_ampdu_action,
+ .set_rts_threshold = mt7996_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
+ .channel_switch_beacon = mt7996_channel_switch_beacon,
+ .get_stats = mt7996_get_stats,
+ .get_et_sset_count = mt7996_get_et_sset_count,
+ .get_et_stats = mt7996_get_et_stats,
+ .get_et_strings = mt7996_get_et_strings,
+ .get_tsf = mt7996_get_tsf,
+ .set_tsf = mt7996_set_tsf,
+ .offset_tsf = mt7996_offset_tsf,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+ .set_antenna = mt7996_set_antenna,
+ .set_bitrate_mask = mt7996_set_bitrate_mask,
+ .set_coverage_class = mt7996_set_coverage_class,
+ .sta_statistics = mt7996_sta_statistics,
+ .sta_set_4addr = mt7996_sta_set_4addr,
+ .sta_set_decap_offload = mt7996_sta_set_decap_offload,
+ .add_twt_setup = mt7996_mac_add_twt_setup,
+ .twt_teardown_request = mt7996_twt_teardown_request,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = mt7996_sta_add_debugfs,
+#endif
+ .set_radar_background = mt7996_set_radar_background,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
new file mode 100644
index 000000000000..04e1d10bbd21
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -0,0 +1,3607 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include "mt7996.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+struct mt7996_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+ u16 reserved;
+ struct {
+ __be32 patch_ver;
+ __be32 subsys;
+ __be32 feature;
+ __be32 n_region;
+ __be32 crc;
+ u32 reserved[11];
+ } desc;
+} __packed;
+
+struct mt7996_patch_sec {
+ __be32 type;
+ __be32 offs;
+ __be32 size;
+ union {
+ __be32 spec[13];
+ struct {
+ __be32 addr;
+ __be32 len;
+ __be32 sec_key_idx;
+ __be32 align_len;
+ u32 reserved[9];
+ } info;
+ };
+} __packed;
+
+struct mt7996_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserved[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+} __packed;
+
+struct mt7996_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 reserved[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 reserved1[15];
+} __packed;
+
+#define MCU_PATCH_ADDRESS 0x200000
+
+#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
+#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+
+static bool sr_scene_detect = true;
+module_param(sr_scene_detect, bool, 0644);
+MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm");
+
+static u8
+mt7996_mcu_get_sta_nss(u16 mcs_map)
+{
+ u8 nss;
+
+ for (nss = 8; nss > 0; nss--) {
+ u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3;
+
+ if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ break;
+ }
+
+ return nss - 1;
+}
+
+static void
+mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
+ u16 mcs_map)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+
+ for (nss = 0; nss < max_nss; nss++) {
+ int mcs;
+
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ mcs = GENMASK(11, 0);
+ break;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ mcs = GENMASK(7, 0);
+ break;
+ default:
+ mcs = 0;
+ }
+
+ mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1;
+
+ switch (mcs) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8 ... 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10 ... 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
+ mcs_map &= ~(0x3 << (nss * 2));
+ mcs_map |= mcs << (nss * 2);
+ }
+
+ *he_mcs = cpu_to_le16(mcs_map);
+}
+
+static void
+mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
+ const u16 *mask)
+{
+ u16 mcs, mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+
+ for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
+ switch (mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mcs = GENMASK(8, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mcs = GENMASK(7, 0);
+ break;
+ default:
+ mcs = 0;
+ }
+
+ vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
+ }
+}
+
+static void
+mt7996_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
+ const u8 *mask)
+{
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+
+ for (nss = 0; nss < max_nss; nss++)
+ ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
+}
+
+static int
+mt7996_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ struct sk_buff *skb, int seq)
+{
+ struct mt7996_mcu_rxd *rxd;
+ struct mt7996_mcu_uni_event *event;
+ int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
+ int ret = 0;
+
+ if (!skb) {
+ dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ rxd = (struct mt7996_mcu_rxd *)skb->data;
+ if (seq != rxd->seq)
+ return -EAGAIN;
+
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
+ } else if ((rxd->option & MCU_UNI_CMD_EVENT) &&
+ rxd->eid == MCU_UNI_EVENT_RESULT) {
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7996_mcu_uni_event *)skb->data;
+ ret = le32_to_cpu(event->status);
+ /* skip invalid event */
+ if (mcu_cmd != event->cid)
+ ret = -EAGAIN;
+ } else {
+ skb_pull(skb, sizeof(struct mt7996_mcu_rxd));
+ }
+
+ return ret;
+}
+
+static int
+mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
+ struct mt76_connac2_mcu_uni_txd *uni_txd;
+ struct mt76_connac2_mcu_txd *mcu_txd;
+ enum mt76_mcuq_id qid;
+ __le32 *txd;
+ u32 val;
+ u8 seq;
+
+ mdev->mcu.timeout = 20 * HZ;
+
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+
+ if (cmd == MCU_CMD(FW_SCATTER)) {
+ qid = MT_MCUQ_FWDL;
+ goto exit;
+ }
+
+ txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ qid = MT_MCUQ_WA;
+ else
+ qid = MT_MCUQ_WM;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
+ FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
+ txd[0] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
+ txd[1] = cpu_to_le32(val);
+
+ if (cmd & __MCU_CMD_FIELD_UNI) {
+ uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2CN;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = MCU_CMD_UNI_QUERY_ACK;
+ else
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+
+ if ((cmd & __MCU_CMD_FIELD_WA) && (cmd & __MCU_CMD_FIELD_WM))
+ uni_txd->s2d_index = MCU_S2D_H2CN;
+ else if (cmd & __MCU_CMD_FIELD_WA)
+ uni_txd->s2d_index = MCU_S2D_H2C;
+ else if (cmd & __MCU_CMD_FIELD_WM)
+ uni_txd->s2d_index = MCU_S2D_H2N;
+
+ goto exit;
+ }
+
+ mcu_txd = (struct mt76_connac2_mcu_txd *)txd;
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
+ MT_TX_MCU_PORT_RX_Q0));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ mcu_txd->cid = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
+ mcu_txd->set_query = MCU_Q_NA;
+ mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
+ if (mcu_txd->ext_cid) {
+ mcu_txd->ext_cid_ack = 1;
+
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ }
+
+ if (cmd & __MCU_CMD_FIELD_WA)
+ mcu_txd->s2d_index = MCU_S2D_H2C;
+ else
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+
+exit:
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
+}
+
+int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
+{
+ struct {
+ __le32 args[3];
+ } req = {
+ .args = {
+ cpu_to_le32(a1),
+ cpu_to_le32(a2),
+ cpu_to_le32(a3),
+ },
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false);
+}
+
+static void
+mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.csa_active)
+ ieee80211_csa_finish(vif);
+}
+
+static void
+mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7996_mcu_rdd_report *r;
+
+ r = (struct mt7996_mcu_rdd_report *)skb->data;
+
+ mphy = dev->mt76.phys[r->band_idx];
+ if (!mphy)
+ return;
+
+ if (r->band_idx == MT_RX_SEL2)
+ cfg80211_background_radar_event(mphy->hw->wiphy,
+ &dev->rdd2_chandef,
+ GFP_ATOMIC);
+ else
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7996_mcu_rx_log_message(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+#define UNI_EVENT_FW_LOG_FORMAT 0
+ struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1] + 4, *type;
+ struct tlv *tlv = (struct tlv *)data;
+ int len;
+
+ if (!(rxd->option & MCU_UNI_CMD_EVENT)) {
+ len = skb->len - sizeof(*rxd);
+ data = (char *)&rxd[1];
+ goto out;
+ }
+
+ if (le16_to_cpu(tlv->tag) != UNI_EVENT_FW_LOG_FORMAT)
+ return;
+
+ data += sizeof(*tlv) + 4;
+ len = le16_to_cpu(tlv->len) - sizeof(*tlv) - 4;
+
+out:
+ switch (rxd->s2d_index) {
+ case 0:
+ if (mt7996_debugfs_rx_log(dev, data, len))
+ return;
+
+ type = "WM";
+ break;
+ case 2:
+ type = "WA";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data);
+}
+
+static void
+mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (!vif->bss_conf.color_change_active)
+ return;
+
+ ieee80211_color_change_finish(vif);
+}
+
+static void
+mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+#define UNI_EVENT_IE_COUNTDOWN_CSA 0
+#define UNI_EVENT_IE_COUNTDOWN_BCC 1
+ struct header {
+ u8 band;
+ u8 rsv[3];
+ };
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1], *tail;
+ struct header *hdr = (struct header *)data;
+ struct tlv *tlv = (struct tlv *)(data + 4);
+
+ if (hdr->band && dev->mt76.phys[hdr->band])
+ mphy = dev->mt76.phys[hdr->band];
+
+ tail = skb->data + le16_to_cpu(rxd->len);
+ while (data + sizeof(struct tlv) < tail && le16_to_cpu(tlv->len)) {
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_IE_COUNTDOWN_CSA:
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_mcu_csa_finish, mphy->hw);
+ break;
+ case UNI_EVENT_IE_COUNTDOWN_BCC:
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_mcu_cca_finish, mphy->hw);
+ break;
+ }
+
+ data += le16_to_cpu(tlv->len);
+ tlv = (struct tlv *)data;
+ }
+}
+
+static void
+mt7996_mcu_rx_ext_event(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
+
+ switch (rxd->ext_eid) {
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7996_mcu_rx_log_message(dev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7996_mcu_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_EVENT_EXT:
+ mt7996_mcu_rx_ext_event(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+static void
+mt7996_mcu_uni_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_UNI_EVENT_FW_LOG_2_HOST:
+ mt7996_mcu_rx_log_message(dev, skb);
+ break;
+ case MCU_UNI_EVENT_IE_COUNTDOWN:
+ mt7996_mcu_ie_countdown(dev, skb);
+ break;
+ case MCU_UNI_EVENT_RDD_REPORT:
+ mt7996_mcu_rx_radar_detected(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
+
+ if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) {
+ mt7996_mcu_uni_rx_unsolicited_event(dev, skb);
+ return;
+ }
+
+ /* WA still uses legacy event*/
+ if (rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+ !rxd->seq)
+ mt7996_mcu_rx_unsolicited_event(dev, skb);
+ else
+ mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+static struct tlv *
+mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
+{
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ return ptlv;
+}
+
+static void
+mt7996_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7996_phy *phy)
+{
+ static const u8 rlm_ch_band[] = {
+ [NL80211_BAND_2GHZ] = 1,
+ [NL80211_BAND_5GHZ] = 2,
+ [NL80211_BAND_6GHZ] = 3,
+ };
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct bss_rlm_tlv *ch;
+ struct tlv *tlv;
+ int freq1 = chandef->center_freq1;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*ch));
+
+ ch = (struct bss_rlm_tlv *)tlv;
+ ch->control_channel = chandef->chan->hw_value;
+ ch->center_chan = ieee80211_frequency_to_channel(freq1);
+ ch->bw = mt76_connac_chan_bw(chandef);
+ ch->tx_streams = hweight8(phy->mt76->antenna_mask);
+ ch->rx_streams = hweight8(phy->mt76->antenna_mask);
+ ch->band = rlm_ch_band[chandef->chan->band];
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ ch->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ }
+}
+
+static void
+mt7996_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7996_phy *phy)
+{
+ struct bss_ra_tlv *ra;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_RA, sizeof(*ra));
+
+ ra = (struct bss_ra_tlv *)tlv;
+ ra->short_preamble = true;
+}
+
+static void
+mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7996_phy *phy)
+{
+#define DEFAULT_HE_PE_DURATION 4
+#define DEFAULT_HE_DURATION_RTS_THRES 1023
+ const struct ieee80211_sta_he_cap *cap;
+ struct bss_info_uni_he *he;
+ struct tlv *tlv;
+
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he));
+
+ he = (struct bss_info_uni_he *)tlv;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ if (!he->he_pe_duration)
+ he->he_pe_duration = DEFAULT_HE_PE_DURATION;
+
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ if (!he->he_rts_thres)
+ he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
+
+ he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
+ he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+static void
+mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7996_phy *phy)
+{
+ struct bss_rate_tlv *bmc;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_RATE, sizeof(*bmc));
+
+ bmc = (struct bss_rate_tlv *)tlv;
+ if (band == NL80211_BAND_2GHZ) {
+ bmc->short_preamble = true;
+ } else {
+ bmc->bc_trans = cpu_to_le16(0x8080);
+ bmc->mc_trans = cpu_to_le16(0x8080);
+ bmc->bc_fixed_rate = 1;
+ bmc->mc_fixed_rate = 1;
+ bmc->short_preamble = 1;
+ }
+}
+
+static void
+mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
+{
+ struct bss_txcmd_tlv *txcmd;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_TXCMD, sizeof(*txcmd));
+
+ txcmd = (struct bss_txcmd_tlv *)tlv;
+ txcmd->txcmd_mode = en;
+}
+
+static void
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct bss_mld_tlv *mld;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
+
+ mld = (struct bss_mld_tlv *)tlv;
+ mld->group_mld_id = 0xff;
+ mld->own_mld_id = mvif->mt76.idx;
+ mld->remap_idx = 0xff;
+}
+
+static void
+mt7996_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct bss_sec_tlv *sec;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_SEC, sizeof(*sec));
+
+ sec = (struct bss_sec_tlv *)tlv;
+ sec->cipher = mvif->cipher;
+}
+
+static int
+mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ bool bssid, bool enable)
+{
+#define UNI_MUAR_ENTRY 2
+ struct mt7996_dev *dev = phy->dev;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
+ const u8 *addr = vif->addr;
+
+ struct {
+ struct {
+ u8 band;
+ u8 __rsv[3];
+ } hdr;
+
+ __le16 tag;
+ __le16 len;
+
+ bool smesh;
+ u8 bssid;
+ u8 index;
+ u8 entry_add;
+ u8 addr[ETH_ALEN];
+ u8 __rsv[2];
+ } __packed req = {
+ .hdr.band = phy->mt76->band_idx,
+ .tag = cpu_to_le16(UNI_MUAR_ENTRY),
+ .len = cpu_to_le16(sizeof(req) - sizeof(req.hdr)),
+ .smesh = false,
+ .index = idx * 2 + bssid,
+ .entry_add = true,
+ };
+
+ if (bssid)
+ addr = vif->bss_conf.bssid;
+
+ if (enable)
+ memcpy(req.addr, addr, ETH_ALEN);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(REPT_MUAR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+ struct mt76_connac_bss_basic_tlv *bss;
+ u32 type = CONNECTION_INFRA_AP;
+ struct tlv *tlv;
+ int idx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ rcu_read_lock();
+ if (!sta)
+ sta = ieee80211_find_sta(vif,
+ vif->bss_conf.bssid);
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (sta) {
+ struct mt76_wcid *wcid;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wlan_idx = wcid->idx;
+ }
+ rcu_read_unlock();
+ }
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*bss));
+
+ bss = (struct mt76_connac_bss_basic_tlv *)tlv;
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
+ bss->sta_idx = cpu_to_le16(wlan_idx);
+ bss->conn_type = cpu_to_le32(type);
+ bss->omac_idx = mvif->omac_idx;
+ bss->band_idx = mvif->band_idx;
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->conn_state = !enable;
+ bss->active = enable;
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ bss->hw_bss_idx = idx;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ memcpy(bss->bssid, phy->macaddr, ETH_ALEN);
+ return 0;
+ }
+
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->phymode = mt76_connac_get_phy_mode(phy, vif,
+ chandef->chan->band, NULL);
+
+ if (chandef->chan->band == NL80211_BAND_6GHZ)
+ bss->phymode_ext |= PHY_MODE_AX_6G;
+
+ return 0;
+}
+
+static struct sk_buff *
+__mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
+{
+ struct bss_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(dev, NULL, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+
+int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
+ struct ieee80211_vif *vif, int enable)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
+ mt7996_mcu_muar_config(phy, vif, false, enable);
+ mt7996_mcu_muar_config(phy, vif, true, enable);
+ }
+
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7996_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* bss_basic must be first */
+ mt7996_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
+ mvif->sta.wcid.idx, enable);
+ mt7996_mcu_bss_sec_tlv(skb, vif);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ goto out;
+
+ if (enable) {
+ mt7996_mcu_bss_rfch_tlv(skb, vif, phy);
+ mt7996_mcu_bss_bmc_tlv(skb, phy);
+ mt7996_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7996_mcu_bss_txcmd_tlv(skb, true);
+
+ if (vif->bss_conf.he_support)
+ mt7996_mcu_bss_he_tlv(skb, vif, phy);
+
+ /* this tag is necessary no matter if the vif is MLD */
+ mt7996_mcu_bss_mld_tlv(skb, vif);
+ }
+out:
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
+static int
+mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
+ struct sta_rec_ba_uni *ba;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba_uni *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+
+ return mt76_mcu_skb_send_msg(dev, skb,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
+}
+
+/** starec & wtbl **/
+int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
+ struct mt7996_vif *mvif = msta->vif;
+
+ if (enable && !params->amsdu)
+ msta->wcid.amsdu = false;
+
+ return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ enable, true);
+}
+
+int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
+ struct mt7996_vif *mvif = msta->vif;
+
+ return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ enable, false);
+}
+
+static void
+mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp mcs_map;
+ struct sta_rec_he_v2 *he;
+ struct tlv *tlv;
+ int i = 0;
+
+ if (!sta->deflink.he_cap.has_he)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_V2, sizeof(*he));
+
+ he = (struct sta_rec_he_v2 *)tlv;
+ for (i = 0; i < 11; i++) {
+ if (i < 6)
+ he->he_mac_cap[i] = cpu_to_le16(elem->mac_cap_info[i]);
+ he->he_phy_cap[i] = cpu_to_le16(elem->phy_cap_info[i]);
+ }
+
+ mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (elem->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ mt7996_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW8080],
+ le16_to_cpu(mcs_map.rx_mcs_80p80));
+
+ mt7996_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW160],
+ le16_to_cpu(mcs_map.rx_mcs_160));
+ fallthrough;
+ default:
+ mt7996_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW80],
+ le16_to_cpu(mcs_map.rx_mcs_80));
+ break;
+ }
+
+ he->pkt_ext = 2;
+}
+
+static void
+mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct sta_rec_he_6g_capa *he_6g;
+ struct tlv *tlv;
+
+ if (!sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g));
+
+ he_6g = (struct sta_rec_he_6g_capa *)tlv;
+ he_6g->capa = sta->deflink.he_6ghz_capa.capa;
+}
+
+static void
+mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct sta_rec_ht *ht;
+ struct tlv *tlv;
+
+ if (!sta->deflink.ht_cap.ht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
+}
+
+static void
+mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct sta_rec_vht *vht;
+ struct tlv *tlv;
+
+ /* For 6G band, this tlv is necessary to let hw work normally */
+ if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+}
+
+static void
+mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct sta_rec_amsdu *amsdu;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ if (!sta->deflink.agg.max_amsdu_len)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ amsdu = (struct sta_rec_amsdu *)tlv;
+ amsdu->max_amsdu_num = 8;
+ amsdu->amsdu_en = true;
+ msta->wcid.amsdu = true;
+
+ switch (sta->deflink.agg.max_amsdu_len) {
+ case IEEE80211_MAX_MPDU_LEN_VHT_11454:
+ amsdu->max_mpdu_size =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ return;
+ case IEEE80211_MAX_MPDU_LEN_HT_7935:
+ case IEEE80211_MAX_MPDU_LEN_VHT_7991:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ return;
+ default:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+ return;
+ }
+}
+
+static inline bool
+mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool bfee)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+ if (!bfee && tx_ant < 2)
+ return false;
+
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+
+ if (bfee)
+ return mvif->cap.he_su_ebfee &&
+ HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
+ else
+ return mvif->cap.he_su_ebfer &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
+ }
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ u32 cap = sta->deflink.vht_cap.cap;
+
+ if (bfee)
+ return mvif->cap.vht_su_ebfee &&
+ (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ else
+ return mvif->cap.vht_su_ebfer &&
+ (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ }
+
+ return false;
+}
+
+static void
+mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
+{
+ bf->sounding_phy = MT_PHY_TYPE_OFDM;
+ bf->ndp_rate = 0; /* mcs0 */
+ bf->ndpa_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ bf->rept_poll_rate = MT7996_CFEND_RATE_DEFAULT; /* ofdm 24m */
+}
+
+static void
+mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
+ struct sta_rec_bf *bf)
+{
+ struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
+ u8 n = 0;
+
+ bf->tx_mode = MT_PHY_TYPE_HT;
+
+ if ((mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) &&
+ (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED))
+ n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK,
+ mcs->tx_params);
+ else if (mcs->rx_mask[3])
+ n = 3;
+ else if (mcs->rx_mask[2])
+ n = 2;
+ else if (mcs->rx_mask[1])
+ n = 1;
+
+ bf->nrow = hweight8(phy->mt76->antenna_mask) - 1;
+ bf->ncol = min_t(u8, bf->nrow, n);
+ bf->ibf_ncol = n;
+}
+
+static void
+mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
+ struct sta_rec_bf *bf, bool explicit)
+{
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+ struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
+ u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
+ u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
+ u8 tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+
+ bf->tx_mode = MT_PHY_TYPE_VHT;
+
+ if (explicit) {
+ u8 sts, snd_dim;
+
+ mt7996_mcu_sta_sounding_rate(bf);
+
+ sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ pc->cap);
+ snd_dim = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ vc->cap);
+ bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->nrow = 1;
+ } else {
+ bf->nrow = tx_ant;
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = nss_mcs;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->ibf_nrow = 1;
+ }
+}
+
+static void
+mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
+ struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
+ const struct ieee80211_sta_he_cap *vc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
+ const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
+ u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
+ u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
+ u8 snd_dim, sts;
+
+ bf->tx_mode = MT_PHY_TYPE_HE_SU;
+
+ mt7996_mcu_sta_sounding_rate(bf);
+
+ bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB,
+ pe->phy_cap_info[6]);
+ bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB,
+ pe->phy_cap_info[6]);
+ snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+ bf->nrow = min_t(u8, snd_dim, sts);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
+
+ if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
+ return;
+
+ /* go over for 160MHz and 80p80 */
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
+ nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
+
+ bf->ncol_gt_bw80 = nss_mcs;
+ }
+
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
+ nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
+
+ if (bf->ncol_gt_bw80)
+ bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs);
+ else
+ bf->ncol_gt_bw80 = nss_mcs;
+ }
+
+ snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+
+ bf->nrow_gt_bw80 = min_t(int, snd_dim, sts);
+}
+
+static void
+mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mvif->phy;
+ int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+ struct sta_rec_bf *bf;
+ struct tlv *tlv;
+ const u8 matrix[4][4] = {
+ {0, 0, 0, 0},
+ {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
+ {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
+ {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */
+ };
+ bool ebf;
+
+ if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ return;
+
+ ebf = mt7996_is_ebf_supported(phy, vif, sta, false);
+ if (!ebf && !dev->ibf)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ bf = (struct sta_rec_bf *)tlv;
+
+ /* he: eBF only, in accordance with spec
+ * vht: support eBF and iBF
+ * ht: iBF only, since mac80211 lacks of eBF support
+ */
+ if (sta->deflink.he_cap.has_he && ebf)
+ mt7996_mcu_sta_bfer_he(sta, vif, phy, bf);
+ else if (sta->deflink.vht_cap.vht_supported)
+ mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
+ else if (sta->deflink.ht_cap.ht_supported)
+ mt7996_mcu_sta_bfer_ht(sta, phy, bf);
+ else
+ return;
+
+ bf->bf_cap = ebf ? ebf : dev->ibf << 1;
+ bf->bw = sta->deflink.bandwidth;
+ bf->ibf_dbw = sta->deflink.bandwidth;
+ bf->ibf_nrow = tx_ant;
+
+ if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ bf->ibf_timeout = 0x48;
+ else
+ bf->ibf_timeout = 0x18;
+
+ if (ebf && bf->nrow != tx_ant)
+ bf->mem_20m = matrix[tx_ant][bf->ncol];
+ else
+ bf->mem_20m = matrix[bf->nrow][bf->ncol];
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ case IEEE80211_STA_RX_BW_80:
+ bf->mem_total = bf->mem_20m * 2;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bf->mem_total = bf->mem_20m;
+ break;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ break;
+ }
+}
+
+static void
+mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mvif->phy;
+ int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+ struct sta_rec_bfee *bfee;
+ struct tlv *tlv;
+ u8 nrow = 0;
+
+ if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
+ return;
+
+ if (!mt7996_is_ebf_supported(phy, vif, sta, true))
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ bfee = (struct sta_rec_bfee *)tlv;
+
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+
+ nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[5]);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+
+ nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ pc->cap);
+ }
+
+ /* reply with identity matrix to avoid 2x2 BF negative gain */
+ bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2);
+}
+
+static void
+mt7996_mcu_sta_phy_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct sta_rec_phy *phy;
+ struct tlv *tlv;
+ u8 af = 0, mm = 0;
+
+ if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
+
+ phy = (struct sta_rec_phy *)tlv;
+ if (sta->deflink.ht_cap.ht_supported) {
+ af = sta->deflink.ht_cap.ampdu_factor;
+ mm = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->deflink.vht_cap.cap);
+
+ af = max_t(u8, af, vht_af);
+ }
+
+ if (sta->deflink.he_6ghz_capa.capa) {
+ af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ }
+
+ phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, af) |
+ FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY, mm);
+ phy->max_ampdu_len = af;
+}
+
+static void
+mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct sta_rec_hdrt *hdrt;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HDRT, sizeof(*hdrt));
+
+ hdrt = (struct sta_rec_hdrt *)tlv;
+ hdrt->hdrt_mode = 1;
+}
+
+static void
+mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct sta_rec_hdr_trans *hdr_trans;
+ struct mt76_wcid *wcid;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HDR_TRANS, sizeof(*hdr_trans));
+ hdr_trans = (struct sta_rec_hdr_trans *)tlv;
+ hdr_trans->dis_rx_hdr_tran = true;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ hdr_trans->to_ds = true;
+ else
+ hdr_trans->from_ds = true;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (!wcid)
+ return;
+
+ hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+ if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
+ hdr_trans->to_ds = true;
+ hdr_trans->from_ds = true;
+ }
+}
+
+static enum mcu_mmps_mode
+mt7996_mcu_get_mmps_mode(enum ieee80211_smps_mode smps)
+{
+ switch (smps) {
+ case IEEE80211_SMPS_OFF:
+ return MCU_MMPS_DISABLE;
+ case IEEE80211_SMPS_STATIC:
+ return MCU_MMPS_STATIC;
+ case IEEE80211_SMPS_DYNAMIC:
+ return MCU_MMPS_DYNAMIC;
+ default:
+ return MCU_MMPS_DISABLE;
+ }
+}
+
+int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
+ void *data, u16 version)
+{
+ struct ra_fixed_rate *req;
+ struct uni_header hdr;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ int len;
+
+ len = sizeof(hdr) + sizeof(*req);
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_RA_FIXED_RATE, sizeof(*req));
+ req = (struct ra_fixed_rate *)tlv;
+ req->version = cpu_to_le16(version);
+ memcpy(&req->rate, data, sizeof(req->rate));
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD(RA), true);
+}
+
+static void
+mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ enum nl80211_band band = chandef->chan->band;
+ struct sta_rec_ra *ra;
+ struct tlv *tlv;
+ u32 supp_rate = sta->deflink.supp_rates[band];
+ u32 cap = sta->wme ? STA_CAP_WMM : 0;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+ ra = (struct sta_rec_ra *)tlv;
+
+ ra->valid = true;
+ ra->auto_rate = true;
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
+ ra->channel = chandef->chan->hw_value;
+ ra->bw = sta->deflink.bandwidth;
+ ra->phy.bw = sta->deflink.bandwidth;
+ ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
+
+ if (supp_rate) {
+ supp_rate &= mask->control[band].legacy;
+ ra->rate_len = hweight32(supp_rate);
+
+ if (band == NL80211_BAND_2GHZ) {
+ ra->supp_mode = MODE_CCK;
+ ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
+
+ if (ra->rate_len > 4) {
+ ra->supp_mode |= MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate >> 4;
+ }
+ } else {
+ ra->supp_mode = MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate;
+ }
+ }
+
+ if (sta->deflink.ht_cap.ht_supported) {
+ ra->supp_mode |= MODE_HT;
+ ra->af = sta->deflink.ht_cap.ampdu_factor;
+ ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+
+ cap |= STA_CAP_HT;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ cap |= STA_CAP_SGI_20;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ cap |= STA_CAP_SGI_40;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ cap |= STA_CAP_TX_STBC;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ cap |= STA_CAP_RX_STBC;
+ if (mvif->cap.ht_ldpc &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ cap |= STA_CAP_LDPC;
+
+ mt7996_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
+ mask->control[band].ht_mcs);
+ ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
+ }
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ u8 af;
+
+ ra->supp_mode |= MODE_VHT;
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->deflink.vht_cap.cap);
+ ra->af = max_t(u8, ra->af, af);
+
+ cap |= STA_CAP_VHT;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ cap |= STA_CAP_VHT_SGI_80;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ cap |= STA_CAP_VHT_SGI_160;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ cap |= STA_CAP_VHT_TX_STBC;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ cap |= STA_CAP_VHT_RX_STBC;
+ if (mvif->cap.vht_ldpc &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ cap |= STA_CAP_VHT_LDPC;
+
+ mt7996_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
+ mask->control[band].vht_mcs);
+ }
+
+ if (sta->deflink.he_cap.has_he) {
+ ra->supp_mode |= MODE_HE;
+ cap |= STA_CAP_HE;
+
+ if (sta->deflink.he_6ghz_capa.capa)
+ ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ }
+ ra->sta_cap = cpu_to_le32(cap);
+}
+
+int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool changed)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* firmware rc algorithm refers to sta_rec_he for HE control.
+ * once dev->rc_work changes the settings driver should also
+ * update sta_rec_he here.
+ */
+ if (changed)
+ mt7996_mcu_sta_he_tlv(skb, sta);
+
+ /* sta_rec_ra accommodates BW, NSS and only MCS range format
+ * i.e 0-{7,8,9} for VHT.
+ */
+ mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
+}
+
+static int
+mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP 1
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta;
+ struct {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+ __le16 wlan_idx;
+ u8 __rsv2[2];
+ __le32 action;
+ __le32 val;
+ u8 __rsv3[8];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_VOW_DRR_CTRL),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .action = cpu_to_le32(MT_STA_BSS_GROUP),
+ .val = cpu_to_le32(mvif->mt76.idx % 16),
+ };
+
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ req.wlan_idx = cpu_to_le16(msta->wcid.idx);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
+ sizeof(req), true);
+}
+
+int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta;
+ struct sk_buff *skb;
+ int ret;
+
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* starec basic */
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable,
+ !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
+ if (!enable)
+ goto out;
+
+ /* tag order is in accordance with firmware dependency. */
+ if (sta) {
+ /* starec phy */
+ mt7996_mcu_sta_phy_tlv(dev, skb, vif, sta);
+ /* starec hdrt mode */
+ mt7996_mcu_sta_hdrt_tlv(dev, skb);
+ /* starec bfer */
+ mt7996_mcu_sta_bfer_tlv(dev, skb, vif, sta);
+ /* starec ht */
+ mt7996_mcu_sta_ht_tlv(skb, sta);
+ /* starec vht */
+ mt7996_mcu_sta_vht_tlv(skb, sta);
+ /* starec uapsd */
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
+ /* starec amsdu */
+ mt7996_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
+ /* starec he */
+ mt7996_mcu_sta_he_tlv(skb, sta);
+ /* starec he 6g*/
+ mt7996_mcu_sta_he_6g_tlv(skb, sta);
+ /* TODO: starec muru */
+ /* starec bfee */
+ mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
+ /* starec hdr trans */
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ }
+
+ ret = mt7996_mcu_add_group(dev, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+out:
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
+}
+
+static int
+mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec_uni *sec;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sec = (struct sta_rec_sec_uni *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key_uni *sec_key;
+ u8 cipher;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ sec_key->key_id = sta_key_conf->keyidx;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, sta_key_conf->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+ sec->n_cipher = 2;
+ } else {
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ /* store key_conf for BIP batch update */
+ if (cipher == MCU_CIPHER_AES_CCMP) {
+ memcpy(sta_key_conf->key, key->key, key->keylen);
+ sta_key_conf->keyidx = key->keyidx;
+ }
+
+ sec->n_cipher = 1;
+ }
+ } else {
+ sec->n_cipher = 0;
+ }
+
+ return 0;
+}
+
+int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt7996_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
+}
+
+int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ u8 __rsv[2];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 __rsv;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->mt76.omac_idx,
+ .band_idx = mvif->mt76.band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ },
+ };
+
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
+ return mt7996_mcu_muar_config(phy, vif, false, enable);
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WMWA_UNI_CMD(DEV_INFO_UPDATE),
+ &data, sizeof(data), true);
+}
+
+static void
+mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
+ struct sk_buff *skb,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct bss_bcn_cntdwn_tlv *info;
+ struct tlv *tlv;
+ u16 tag;
+
+ if (!offs->cntdwn_counter_offs[0])
+ return;
+
+ tag = vif->bss_conf.csa_active ? UNI_BSS_INFO_BCN_CSA : UNI_BSS_INFO_BCN_BCC;
+
+ tlv = mt7996_mcu_add_uni_tlv(rskb, tag, sizeof(*info));
+
+ info = (struct bss_bcn_cntdwn_tlv *)tlv;
+ info->cnt = skb->data[offs->cntdwn_counter_offs[0]];
+}
+
+static void
+mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct sk_buff *rskb, struct sk_buff *skb,
+ struct bss_bcn_content_tlv *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ u8 *buf;
+
+ bcn->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ bcn->tim_ie_pos = cpu_to_le16(offs->tim_offset);
+
+ if (offs->cntdwn_counter_offs[0]) {
+ u16 offset = offs->cntdwn_counter_offs[0];
+
+ if (vif->bss_conf.csa_active)
+ bcn->csa_ie_pos = cpu_to_le16(offset - 4);
+ if (vif->bss_conf.color_change_active)
+ bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
+ }
+
+ buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE;
+ mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
+ BSS_CHANGED_BEACON);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+}
+
+static void
+mt7996_mcu_beacon_check_caps(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_vif_cap *vc = &mvif->cap;
+ const struct ieee80211_he_cap_elem *he;
+ const struct ieee80211_vht_cap *vht;
+ const struct ieee80211_ht_cap *ht;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ const u8 *ie;
+ u32 len, bc;
+
+ /* Check missing configuration options to allow AP mode in mac80211
+ * to remain in sync with hostapd settings, and get a subset of
+ * beacon and hardware capabilities.
+ */
+ if (WARN_ON_ONCE(skb->len <= (mgmt->u.beacon.variable - skb->data)))
+ return;
+
+ memset(vc, 0, sizeof(*vc));
+
+ len = skb->len - (mgmt->u.beacon.variable - skb->data);
+
+ ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, mgmt->u.beacon.variable,
+ len);
+ if (ie && ie[1] >= sizeof(*ht)) {
+ ht = (void *)(ie + 2);
+ vc->ht_ldpc |= !!(le16_to_cpu(ht->cap_info) &
+ IEEE80211_HT_CAP_LDPC_CODING);
+ }
+
+ ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable,
+ len);
+ if (ie && ie[1] >= sizeof(*vht)) {
+ u32 pc = phy->mt76->sband_5g.sband.vht_cap.cap;
+
+ vht = (void *)(ie + 2);
+ bc = le32_to_cpu(vht->vht_cap_info);
+
+ vc->vht_ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC);
+ vc->vht_su_ebfer =
+ (bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ vc->vht_su_ebfee =
+ (bc & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ vc->vht_mu_ebfer =
+ (bc & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+ vc->vht_mu_ebfee =
+ (bc & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ }
+
+ ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY,
+ mgmt->u.beacon.variable, len);
+ if (ie && ie[1] >= sizeof(*he) + 1) {
+ const struct ieee80211_sta_he_cap *pc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
+ const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
+
+ he = (void *)(ie + 3);
+
+ vc->he_ldpc =
+ HE_PHY(CAP1_LDPC_CODING_IN_PAYLOAD, pe->phy_cap_info[1]);
+ vc->he_su_ebfer =
+ HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) &&
+ HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
+ vc->he_su_ebfee =
+ HE_PHY(CAP4_SU_BEAMFORMEE, he->phy_cap_info[4]) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
+ vc->he_mu_ebfer =
+ HE_PHY(CAP4_MU_BEAMFORMER, he->phy_cap_info[4]) &&
+ HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4]);
+ }
+}
+
+int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int en)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ struct bss_bcn_content_tlv *bcn;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7996_BEACON_UPDATE_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+ tlv = mt7996_mcu_add_uni_tlv(rskb,
+ UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn));
+ bcn = (struct bss_bcn_content_tlv *)tlv;
+ bcn->enable = en;
+
+ if (!en)
+ goto out;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+
+ mt7996_mcu_beacon_check_caps(phy, vif, skb);
+
+ mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ /* TODO: subtag - 11v MBSSID */
+ mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
+ dev_kfree_skb(skb);
+out:
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
+int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif, u32 changed)
+{
+#define OFFLOAD_TX_MODE_SU BIT(0)
+#define OFFLOAD_TX_MODE_MU BIT(1)
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_inband_discovery_tlv *discov;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *rskb, *skb = NULL;
+ struct tlv *tlv;
+ u8 *buf, interval;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7996_INBAND_FRAME_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ vif->bss_conf.fils_discovery.max_interval) {
+ interval = vif->bss_conf.fils_discovery.max_interval;
+ skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
+ } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
+ vif->bss_conf.unsol_bcast_probe_resp_interval) {
+ interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->band = band;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+
+ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov));
+
+ discov = (struct bss_inband_discovery_tlv *)tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+ discov->tx_interval = interval;
+ discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ discov->enable = true;
+ discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED);
+
+ buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE;
+
+ mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
+ changed);
+
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, rskb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
+static int mt7996_driver_own(struct mt7996_dev *dev, u8 band)
+{
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band),
+ MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ /* clear irq when the driver own success */
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band),
+ MT_TOP_LPCR_HOST_BAND_STAT);
+
+ return 0;
+}
+
+static u32 mt7996_patch_sec_mode(u32 key_info)
+{
+ u32 sec = u32_get_bits(key_info, MT7996_PATCH_SEC), key = 0;
+
+ if (key_info == GENMASK(31, 0) || sec == MT7996_SEC_MODE_PLAIN)
+ return 0;
+
+ if (sec == MT7996_SEC_MODE_AES)
+ key = u32_get_bits(key_info, MT7996_PATCH_AES_KEY);
+ else
+ key = u32_get_bits(key_info, MT7996_PATCH_SCRAMBLE_KEY);
+
+ return MT7996_SEC_ENCRYPT | MT7996_SEC_IV |
+ u32_encode_bits(key, MT7996_SEC_KEY_IDX);
+}
+
+static int mt7996_load_patch(struct mt7996_dev *dev)
+{
+ const struct mt7996_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+ int i, ret, sem;
+
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 1);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, MT7996_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7996_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt7996_patch_sec *sec;
+ const u8 *dl;
+ u32 len, addr, sec_key_idx, mode = DL_MODE_NEED_RSP;
+
+ sec = (struct mt7996_patch_sec *)(fw->data + sizeof(*hdr) +
+ i * sizeof(*sec));
+ if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
+ PATCH_SEC_TYPE_INFO) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ sec_key_idx = be32_to_cpu(sec->info.sec_key_idx);
+ dl = fw->data + be32_to_cpu(sec->offs);
+
+ mode |= mt7996_patch_sec_mode(sec_key_idx);
+
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
+ dl, len, 4096);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send patch\n");
+ goto out;
+ }
+ }
+
+ ret = mt76_connac_mcu_start_patch(&dev->mt76);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 0);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ break;
+ }
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int
+mt7996_mcu_send_ram_firmware(struct mt7996_dev *dev,
+ const struct mt7996_fw_trailer *hdr,
+ const u8 *data, bool is_wa)
+{
+ int i, offset = 0;
+ u32 override = 0, option = 0;
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt7996_fw_region *region;
+ int err;
+ u32 len, addr, mode;
+
+ region = (const struct mt7996_fw_region *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+ err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
+ if (err) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ return err;
+ }
+
+ err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
+ data + offset, len, 4096);
+ if (err) {
+ dev_err(dev->mt76.dev, "Failed to send firmware.\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ if (override)
+ option |= FW_START_OVERRIDE;
+
+ if (is_wa)
+ option |= FW_START_WORKING_PDA_CR4;
+
+ return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
+}
+
+static int mt7996_load_ram(struct mt7996_dev *dev)
+{
+ const struct mt7996_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, MT7996_FIRMWARE_WM, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7996_fw_trailer *)(fw->data + fw->size - sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7996_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
+ goto out;
+ }
+
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, MT7996_FIRMWARE_WA, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7996_fw_trailer *)(fw->data + fw->size - sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7996_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WA firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int
+mt7996_firmware_state(struct mt7996_dev *dev, bool wa)
+{
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
+ state, 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int mt7996_load_firmware(struct mt7996_dev *dev)
+{
+ int ret;
+
+ /* make sure fw is download state */
+ if (mt7996_firmware_state(dev, false)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ ret = mt7996_firmware_state(dev, false);
+ if (ret) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return ret;
+ }
+ }
+
+ ret = mt7996_load_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7996_load_ram(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7996_firmware_state(dev, true);
+ if (ret)
+ return ret;
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7996_mcu_fw_log_2_host(struct mt7996_dev *dev, u8 type, u8 ctrl)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 ctrl;
+ u8 interval;
+ u8 _rsv2[2];
+ } __packed data = {
+ .tag = cpu_to_le16(UNI_WSYS_CONFIG_FW_LOG_CTRL),
+ .len = cpu_to_le16(sizeof(data) - 4),
+ .ctrl = ctrl,
+ };
+
+ if (type == MCU_FW_LOG_WA)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(WSYS_CONFIG),
+ &data, sizeof(data), true);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(WSYS_CONFIG), &data,
+ sizeof(data), true);
+}
+
+int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ __le32 module_idx;
+ u8 level;
+ u8 _rsv2[3];
+ } data = {
+ .tag = cpu_to_le16(UNI_WSYS_CONFIG_FW_DBG_CTRL),
+ .len = cpu_to_le16(sizeof(data) - 4),
+ .module_idx = cpu_to_le32(module),
+ .level = level,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(WSYS_CONFIG), &data,
+ sizeof(data), false);
+}
+
+static int mt7996_mcu_set_mwds(struct mt7996_dev *dev, bool enabled)
+{
+ struct {
+ u8 enable;
+ u8 _rsv[3];
+ } __packed req = {
+ .enable = enabled
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(MWDS_SUPPORT), &req,
+ sizeof(req), false);
+}
+
+static void mt7996_add_rx_airtime_tlv(struct sk_buff *skb, u8 band_idx)
+{
+ struct vow_rx_airtime *req;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_VOW_RX_AT_AIRTIME_CLR_EN, sizeof(*req));
+ req = (struct vow_rx_airtime *)tlv;
+ req->enable = true;
+ req->band = band_idx;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_VOW_RX_AT_AIRTIME_EN, sizeof(*req));
+ req = (struct vow_rx_airtime *)tlv;
+ req->enable = true;
+ req->band = band_idx;
+}
+
+static int
+mt7996_mcu_init_rx_airtime(struct mt7996_dev *dev)
+{
+ struct uni_header hdr = {};
+ struct sk_buff *skb;
+ int len, num;
+
+ num = 2 + 2 * (dev->dbdc_support + dev->tbtc_support);
+ len = sizeof(hdr) + num * sizeof(struct vow_rx_airtime);
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ mt7996_add_rx_airtime_tlv(skb, dev->mt76.phy.band_idx);
+
+ if (dev->dbdc_support)
+ mt7996_add_rx_airtime_tlv(skb, MT_BAND1);
+
+ if (dev->tbtc_support)
+ mt7996_add_rx_airtime_tlv(skb, MT_BAND2);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD(VOW), true);
+}
+
+static int
+mt7996_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 power_mode;
+ u8 __rsv2[3];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_POWER_OFF),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CREL), &req,
+ sizeof(req), false);
+}
+
+int mt7996_mcu_init(struct mt7996_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7996_mcu_ops = {
+ .headroom = sizeof(struct mt76_connac2_mcu_txd), /* reuse */
+ .mcu_skb_send_msg = mt7996_mcu_send_message,
+ .mcu_parse_response = mt7996_mcu_parse_response,
+ .mcu_restart = mt7996_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7996_mcu_ops;
+
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7996_driver_own(dev, 0);
+ if (ret)
+ return ret;
+ /* set driver own for band1 when two hif exist */
+ if (dev->hif2) {
+ ret = mt7996_driver_own(dev, 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7996_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7996_mcu_set_mwds(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = mt7996_mcu_init_rx_airtime(dev);
+ if (ret)
+ return ret;
+
+ return mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_RED, 0, 0);
+}
+
+void mt7996_mcu_exit(struct mt7996_dev *dev)
+{
+ __mt76_mcu_restart(&dev->mt76);
+ if (mt7996_firmware_state(dev, false)) {
+ dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+ return;
+ }
+
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ if (dev->hif2)
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ MT_TOP_LPCR_HOST_FW_OWN);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+
+int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans)
+{
+ struct {
+ u8 __rsv[4];
+ } __packed hdr;
+ struct hdr_trans_blacklist *req_blacklist;
+ struct hdr_trans_en *req_en;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ int len = MT7996_HDR_TRANS_MAX_SIZE + sizeof(hdr);
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_HDR_TRANS_EN, sizeof(*req_en));
+ req_en = (struct hdr_trans_en *)tlv;
+ req_en->enable = hdr_trans;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_HDR_TRANS_VLAN,
+ sizeof(struct hdr_trans_vlan));
+
+ if (hdr_trans) {
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_HDR_TRANS_BLACKLIST,
+ sizeof(*req_blacklist));
+ req_blacklist = (struct hdr_trans_blacklist *)tlv;
+ req_blacklist->enable = 1;
+ req_blacklist->type = cpu_to_le16(ETH_P_PAE);
+ }
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD(RX_HDR_TRANS), true);
+}
+
+int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
+{
+#define MCU_EDCA_AC_PARAM 0
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
+ WMM_CW_MAX_SET | WMM_TXOP_SET)
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 __rsv[3];
+ } __packed hdr = {
+ .bss_idx = mvif->mt76.idx,
+ };
+ struct sk_buff *skb;
+ int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca);
+ int ac;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct edca *e;
+ struct tlv *tlv;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, MCU_EDCA_AC_PARAM, sizeof(*e));
+
+ e = (struct edca *)tlv;
+ e->set = WMM_PARAM_SET;
+ e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS;
+ e->aifs = q->aifs;
+ e->txop = cpu_to_le16(q->txop);
+
+ if (q->cw_min)
+ e->cw_min = fls(q->cw_min);
+ else
+ e->cw_min = 5;
+
+ if (q->cw_max)
+ e->cw_max = fls(q->cw_max);
+ else
+ e->cw_max = 10;
+ }
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD(EDCA_UPDATE), true);
+}
+
+int mt7996_mcu_set_fcc5_lpn(struct mt7996_dev *dev, int val)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 ctrl;
+ __le16 min_lpn;
+ u8 rsv[2];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_RDD_CTRL_SET_TH),
+ .len = cpu_to_le16(sizeof(req) - 4),
+
+ .ctrl = cpu_to_le32(0x1),
+ .min_lpn = cpu_to_le16(val),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RDD_CTRL),
+ &req, sizeof(req), true);
+}
+
+int mt7996_mcu_set_pulse_th(struct mt7996_dev *dev,
+ const struct mt7996_dfs_pulse *pulse)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 ctrl;
+
+ __le32 max_width; /* us */
+ __le32 max_pwr; /* dbm */
+ __le32 min_pwr; /* dbm */
+ __le32 min_stgr_pri; /* us */
+ __le32 max_stgr_pri; /* us */
+ __le32 min_cr_pri; /* us */
+ __le32 max_cr_pri; /* us */
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_RDD_CTRL_SET_TH),
+ .len = cpu_to_le16(sizeof(req) - 4),
+
+ .ctrl = cpu_to_le32(0x3),
+
+#define __req_field(field) .field = cpu_to_le32(pulse->field)
+ __req_field(max_width),
+ __req_field(max_pwr),
+ __req_field(min_pwr),
+ __req_field(min_stgr_pri),
+ __req_field(max_stgr_pri),
+ __req_field(min_cr_pri),
+ __req_field(max_cr_pri),
+#undef __req_field
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RDD_CTRL),
+ &req, sizeof(req), true);
+}
+
+int mt7996_mcu_set_radar_th(struct mt7996_dev *dev, int index,
+ const struct mt7996_dfs_pattern *pattern)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 ctrl;
+ __le16 radar_type;
+
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ __le32 min_pri;
+ __le32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ __le32 min_stgpr_diff;
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_RDD_CTRL_SET_TH),
+ .len = cpu_to_le16(sizeof(req) - 4),
+
+ .ctrl = cpu_to_le32(0x2),
+ .radar_type = cpu_to_le16(index),
+
+#define __req_field_u8(field) .field = pattern->field
+#define __req_field_u32(field) .field = cpu_to_le32(pattern->field)
+ __req_field_u8(enb),
+ __req_field_u8(stgr),
+ __req_field_u8(min_crpn),
+ __req_field_u8(max_crpn),
+ __req_field_u8(min_crpr),
+ __req_field_u8(min_pw),
+ __req_field_u32(min_pri),
+ __req_field_u32(max_pri),
+ __req_field_u8(max_pw),
+ __req_field_u8(min_crbn),
+ __req_field_u8(max_crbn),
+ __req_field_u8(min_stgpn),
+ __req_field_u8(max_stgpn),
+ __req_field_u8(min_stgpr),
+ __req_field_u32(min_stgpr_diff),
+#undef __req_field_u8
+#undef __req_field_u32
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RDD_CTRL),
+ &req, sizeof(req), true);
+}
+
+static int
+mt7996_mcu_background_chain_ctrl(struct mt7996_phy *phy,
+ struct cfg80211_chan_def *chandef,
+ int cmd)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->chandef.chan;
+ int freq = mphy->chandef.center_freq1;
+ struct mt7996_mcu_background_chain_ctrl req = {
+ .tag = cpu_to_le16(0),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .monitor_scan_type = 2, /* simple rx */
+ };
+
+ if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP)
+ return -EINVAL;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ switch (cmd) {
+ case CH_SWITCH_BACKGROUND_SCAN_START: {
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.monitor_bw = mt76_connac_chan_bw(chandef);
+ req.band_idx = phy->mt76->band_idx;
+ req.scan_mode = 1;
+ break;
+ }
+ case CH_SWITCH_BACKGROUND_SCAN_RUNNING:
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.band_idx = phy->mt76->band_idx;
+ req.scan_mode = 2;
+ break;
+ case CH_SWITCH_BACKGROUND_SCAN_STOP:
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.tx_stream = hweight8(mphy->antenna_mask);
+ req.rx_stream = mphy->antenna_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(OFFCH_SCAN_CTRL),
+ &req, sizeof(req), false);
+}
+
+int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7996_dev *dev = phy->dev;
+ int err, region;
+
+ if (!chandef) { /* disable offchain */
+ err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, MT_RX_SEL2,
+ 0, 0);
+ if (err)
+ return err;
+
+ return mt7996_mcu_background_chain_ctrl(phy, NULL,
+ CH_SWITCH_BACKGROUND_SCAN_STOP);
+ }
+
+ err = mt7996_mcu_background_chain_ctrl(phy, chandef,
+ CH_SWITCH_BACKGROUND_SCAN_START);
+ if (err)
+ return err;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ return mt7996_mcu_rdd_cmd(dev, RDD_START, MT_RX_SEL2,
+ 0, region);
+}
+
+int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
+{
+ static const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 0,
+ [NL80211_BAND_5GHZ] = 1,
+ [NL80211_BAND_6GHZ] = 2,
+ };
+ struct mt7996_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1;
+ u8 band_idx = phy->mt76->band_idx;
+ struct {
+ /* fixed field */
+ u8 __rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_path_num;
+ u8 rx_path; /* mask or num */
+ u8 switch_reason;
+ u8 band_idx;
+ u8 center_ch2; /* for 80+80 only */
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 ap_bw;
+ u8 ap_center_ch;
+ u8 rsv1[53];
+ } __packed req = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .bw = mt76_connac_chan_bw(chandef),
+ .tx_path_num = hweight16(phy->mt76->chainmask),
+ .rx_path = phy->mt76->chainmask >> dev->chainshift[band_idx],
+ .band_idx = band_idx,
+ .channel_band = ch_band[chandef->chan->band],
+ };
+
+ if (tag == UNI_CHANNEL_RX_PATH ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
+ req.switch_reason = CH_SWITCH_DFS;
+ else
+ req.switch_reason = CH_SWITCH_NORMAL;
+
+ if (tag == UNI_CHANNEL_SWITCH)
+ req.rx_path = hweight8(req.rx_path);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ req.center_ch2 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WMWA_UNI_CMD(CHANNEL_SWITCH),
+ &req, sizeof(req), true);
+}
+
+static int mt7996_mcu_set_eeprom_flash(struct mt7996_dev *dev)
+{
+#define MAX_PAGE_IDX_MASK GENMASK(7, 5)
+#define PAGE_IDX_MASK GENMASK(4, 2)
+#define PER_PAGE_SIZE 0x400
+ struct mt7996_mcu_eeprom req = {
+ .tag = cpu_to_le16(UNI_EFUSE_BUFFER_MODE),
+ .buffer_mode = EE_MODE_BUFFER
+ };
+ u16 eeprom_size = MT7996_EEPROM_SIZE;
+ u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE);
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ int eep_len, i;
+
+ for (i = 0; i < total; i++, eep += eep_len) {
+ struct sk_buff *skb;
+ int ret, msg_len;
+
+ if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE))
+ eep_len = eeprom_size % PER_PAGE_SIZE;
+ else
+ eep_len = PER_PAGE_SIZE;
+
+ msg_len = sizeof(req) + eep_len;
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, msg_len);
+ if (!skb)
+ return -ENOMEM;
+
+ req.len = cpu_to_le16(msg_len - 4);
+ req.format = FIELD_PREP(MAX_PAGE_IDX_MASK, total - 1) |
+ FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE;
+ req.buf_len = cpu_to_le16(eep_len);
+
+ skb_put_data(skb, &req, sizeof(req));
+ skb_put_data(skb, eep, eep_len);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD(EFUSE_CTRL), true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mt7996_mcu_set_eeprom(struct mt7996_dev *dev)
+{
+ struct mt7996_mcu_eeprom req = {
+ .tag = cpu_to_le16(UNI_EFUSE_BUFFER_MODE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .buffer_mode = EE_MODE_EFUSE,
+ .format = EE_FORMAT_WHOLE
+ };
+
+ if (dev->flash_mode)
+ return mt7996_mcu_set_eeprom_flash(dev);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(EFUSE_CTRL),
+ &req, sizeof(req), true);
+}
+
+int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ __le32 addr;
+ __le32 valid;
+ u8 data[16];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_EFUSE_ACCESS),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .addr = cpu_to_le32(round_down(offset,
+ MT7996_EEPROM_BLOCK_SIZE)),
+ };
+ struct sk_buff *skb;
+ bool valid;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), &req,
+ sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ valid = le32_to_cpu(*(__le32 *)(skb->data + 16));
+ if (valid) {
+ u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
+ u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
+
+ skb_pull(skb, 64);
+ memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 num;
+ u8 version;
+ u8 die_idx;
+ u8 _rsv2;
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_EFUSE_FREE_BLOCK),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .version = 2,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), &req,
+ sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ *block_num = *(u8 *)(skb->data + 8);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+{
+ struct {
+ struct {
+ u8 band;
+ u8 __rsv[3];
+ } hdr;
+ struct {
+ __le16 tag;
+ __le16 len;
+ __le32 offs;
+ } data[4];
+ } __packed req = {
+ .hdr.band = phy->mt76->band_idx,
+ };
+ /* strict order */
+ static const u32 offs[] = {
+ UNI_MIB_TX_TIME,
+ UNI_MIB_RX_TIME,
+ UNI_MIB_OBSS_AIRTIME,
+ UNI_MIB_NON_WIFI_TIME,
+ };
+ struct mt76_channel_state *state = phy->mt76->chan_state;
+ struct mt76_channel_state *state_ts = &phy->state_ts;
+ struct mt7996_dev *dev = phy->dev;
+ struct mt7996_mcu_mib *res;
+ struct sk_buff *skb;
+ int i, ret;
+
+ for (i = 0; i < 4; i++) {
+ req.data[i].tag = cpu_to_le16(UNI_CMD_MIB_DATA);
+ req.data[i].len = cpu_to_le16(sizeof(req.data[i]));
+ req.data[i].offs = cpu_to_le32(offs[i]);
+ }
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(GET_MIB_INFO),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ skb_pull(skb, sizeof(req.hdr));
+
+ res = (struct mt7996_mcu_mib *)(skb->data);
+
+ if (chan_switch)
+ goto out;
+
+#define __res_u64(s) le64_to_cpu(res[s].data)
+ state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+ state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+ state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+ state->cc_busy += __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3) -
+ state_ts->cc_busy;
+
+out:
+ state_ts->cc_tx = __res_u64(1);
+ state_ts->cc_bss_rx = __res_u64(2);
+ state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+ state_ts->cc_busy = __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3);
+#undef __res_u64
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 val, u8 band)
+{
+ struct {
+ u8 rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ union {
+ struct {
+ __le32 mask;
+ } __packed set;
+
+ struct {
+ u8 method;
+ u8 band;
+ u8 rsv2[2];
+ } __packed trigger;
+ };
+ } __packed req = {
+ .tag = cpu_to_le16(action),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+
+ switch (action) {
+ case UNI_CMD_SER_SET:
+ req.set.mask = cpu_to_le32(val);
+ break;
+ case UNI_CMD_SER_TRIGGER:
+ req.trigger.method = val;
+ req.trigger.band = band;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SER),
+ &req, sizeof(req), false);
+}
+
+int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
+{
+#define MT7996_BF_MAX_SIZE sizeof(union bf_tag_tlv)
+#define BF_PROCESSING 4
+ struct uni_header hdr;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ int len = sizeof(hdr) + MT7996_BF_MAX_SIZE;
+
+ memset(&hdr, 0, sizeof(hdr));
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ switch (action) {
+ case BF_SOUNDING_ON: {
+ struct bf_sounding_on *req_snd_on;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_snd_on));
+ req_snd_on = (struct bf_sounding_on *)tlv;
+ req_snd_on->snd_mode = BF_PROCESSING;
+ break;
+ }
+ case BF_HW_EN_UPDATE: {
+ struct bf_hw_en_status_update *req_hw_en;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_hw_en));
+ req_hw_en = (struct bf_hw_en_status_update *)tlv;
+ req_hw_en->ebf = true;
+ req_hw_en->ibf = dev->ibf;
+ break;
+ }
+ case BF_MOD_EN_CTRL: {
+ struct bf_mod_en_ctrl *req_mod_en;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
+ req_mod_en = (struct bf_mod_en_ctrl *)tlv;
+ req_mod_en->bf_num = 2;
+ req_mod_en->bf_bitmap = GENMASK(0, 0);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WM_UNI_CMD(BF), true);
+}
+
+static int
+mt7996_mcu_enable_obss_spr(struct mt7996_phy *phy, u16 action, u8 val)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct {
+ u8 band_idx;
+ u8 __rsv[3];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 val;
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = cpu_to_le16(action),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .val = cpu_to_le32(val),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SR),
+ &req, sizeof(req), true);
+}
+
+static int
+mt7996_mcu_set_obss_spr_pd(struct mt7996_phy *phy,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7996_dev *dev = phy->dev;
+ u8 max_th = 82, non_srg_max_th = 62;
+ struct {
+ u8 band_idx;
+ u8 __rsv[3];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 pd_th_non_srg;
+ u8 pd_th_srg;
+ u8 period_offs;
+ u8 rcpi_src;
+ __le16 obss_pd_min;
+ __le16 obss_pd_min_srg;
+ u8 resp_txpwr_mode;
+ u8 txpwr_restrict_mode;
+ u8 txpwr_ref;
+ u8 __rsv2[3];
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = cpu_to_le16(UNI_CMD_SR_SET_PARAM),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .obss_pd_min = cpu_to_le16(max_th),
+ .obss_pd_min_srg = cpu_to_le16(max_th),
+ .txpwr_restrict_mode = 2,
+ .txpwr_ref = 21
+ };
+ int ret;
+
+ /* disable firmware dynamical PD asjustment */
+ ret = mt7996_mcu_enable_obss_spr(phy, UNI_CMD_SR_ENABLE_DPD, false);
+ if (ret)
+ return ret;
+
+ if (he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED)
+ req.pd_th_non_srg = max_th;
+ else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ req.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset;
+ else
+ req.pd_th_non_srg = non_srg_max_th;
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ req.pd_th_srg = max_th - he_obss_pd->max_offset;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SR),
+ &req, sizeof(req), true);
+}
+
+static int
+mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_dev *dev = phy->dev;
+ u8 omac = mvif->mt76.omac_idx;
+ struct {
+ u8 band_idx;
+ u8 __rsv[3];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 omac;
+ u8 __rsv2[3];
+ u8 flag[20];
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = cpu_to_le16(UNI_CMD_SR_SET_SIGA),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .omac = omac > HW_BSSID_MAX ? omac - 12 : omac,
+ };
+ int ret;
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED)
+ req.flag[req.omac] = 0xf;
+ else
+ return 0;
+
+ /* switch to normal AP mode */
+ ret = mt7996_mcu_enable_obss_spr(phy, UNI_CMD_SR_ENABLE_MODE, 0);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SR),
+ &req, sizeof(req), true);
+}
+
+static int
+mt7996_mcu_set_obss_spr_bitmap(struct mt7996_phy *phy,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7996_dev *dev = phy->dev;
+ struct {
+ u8 band_idx;
+ u8 __rsv[3];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 color_l[2];
+ __le32 color_h[2];
+ __le32 bssid_l[2];
+ __le32 bssid_h[2];
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = cpu_to_le16(UNI_CMD_SR_SET_SRG_BITMAP),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+ u32 bitmap;
+
+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+ req.color_l[req.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap));
+ req.color_h[req.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+ req.bssid_l[req.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap));
+ req.bssid_h[req.band_idx] = cpu_to_le32(bitmap);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(SR), &req,
+ sizeof(req), true);
+}
+
+int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ int ret;
+
+ /* enable firmware scene detection algorithms */
+ ret = mt7996_mcu_enable_obss_spr(phy, UNI_CMD_SR_ENABLE_SD,
+ sr_scene_detect);
+ if (ret)
+ return ret;
+
+ /* firmware dynamically adjusts PD threshold so skip manual control */
+ if (sr_scene_detect && !he_obss_pd->enable)
+ return 0;
+
+ /* enable spatial reuse */
+ ret = mt7996_mcu_enable_obss_spr(phy, UNI_CMD_SR_ENABLE,
+ he_obss_pd->enable);
+ if (ret)
+ return ret;
+
+ if (sr_scene_detect || !he_obss_pd->enable)
+ return 0;
+
+ ret = mt7996_mcu_enable_obss_spr(phy, UNI_CMD_SR_ENABLE_TX, true);
+ if (ret)
+ return ret;
+
+ /* set SRG/non-SRG OBSS PD threshold */
+ ret = mt7996_mcu_set_obss_spr_pd(phy, he_obss_pd);
+ if (ret)
+ return ret;
+
+ /* Set SR prohibit */
+ ret = mt7996_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
+ if (ret)
+ return ret;
+
+ /* set SRG BSS color/BSSID bitmap */
+ return mt7996_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
+}
+
+int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color)
+{
+ int len = sizeof(struct bss_req_hdr) + sizeof(struct bss_color_tlv);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct bss_color_tlv *bss_color;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BSS_COLOR,
+ sizeof(*bss_color));
+ bss_color = (struct bss_color_tlv *)tlv;
+ bss_color->enable = he_bss_color->enabled;
+ bss_color->color = he_bss_color->color;
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
+#define TWT_AGRT_TRIGGER BIT(0)
+#define TWT_AGRT_ANNOUNCE BIT(1)
+#define TWT_AGRT_PROTECT BIT(2)
+
+int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ struct mt7996_vif *mvif,
+ struct mt7996_twt_flow *flow,
+ int cmd)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 tbl_idx;
+ u8 cmd;
+ u8 own_mac_idx;
+ u8 flowid; /* 0xff for group id */
+ __le16 peer_id; /* specify the peer_id (msb=0)
+ * or group_id (msb=1)
+ */
+ u8 duration; /* 256 us */
+ u8 bss_idx;
+ __le64 start_tsf;
+ __le16 mantissa;
+ u8 exponent;
+ u8 is_ap;
+ u8 agrt_params;
+ u8 __rsv2[135];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .tbl_idx = flow->table_id,
+ .cmd = cmd,
+ .own_mac_idx = mvif->mt76.omac_idx,
+ .flowid = flow->id,
+ .peer_id = cpu_to_le16(flow->wcid),
+ .duration = flow->duration,
+ .bss_idx = mvif->mt76.idx,
+ .start_tsf = cpu_to_le64(flow->tsf),
+ .mantissa = flow->mantissa,
+ .exponent = flow->exp,
+ .is_ap = true,
+ };
+
+ if (flow->protection)
+ req.agrt_params |= TWT_AGRT_PROTECT;
+ if (!flow->flowtype)
+ req.agrt_params |= TWT_AGRT_ANNOUNCE;
+ if (flow->trigger)
+ req.agrt_params |= TWT_AGRT_TRIGGER;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(TWT),
+ &req, sizeof(req), true);
+}
+
+void mt7996_mcu_set_pm(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+#define EXIT_PM_STATE 0
+#define ENTER_PM_STATE 1
+ struct ieee80211_hw *hw = priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct bss_power_save *ps;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ bool running = test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7996_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return;
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_PS, sizeof(*ps));
+ ps = (struct bss_power_save *)tlv;
+ ps->profile = running ? EXIT_PM_STATE : ENTER_PM_STATE;
+
+ mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
+int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val)
+{
+ struct {
+ u8 band_idx;
+ u8 _rsv[3];
+
+ __le16 tag;
+ __le16 len;
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = cpu_to_le16(UNI_BAND_CONFIG_RTS_THRESHOLD),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+
+ return mt76_mcu_send_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(BAND_CONFIG),
+ &req, sizeof(req), true);
+}
+
+int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable)
+{
+ struct {
+ u8 band_idx;
+ u8 _rsv[3];
+
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 _rsv2[3];
+ } __packed req = {
+ .band_idx = phy->mt76->band_idx,
+ .tag = cpu_to_le16(UNI_BAND_CONFIG_RADIO_ENABLE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .enable = enable,
+ };
+
+ return mt76_mcu_send_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(BAND_CONFIG),
+ &req, sizeof(req), true);
+}
+
+int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_RDD_CTRL_PARM),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RDD_CTRL),
+ &req, sizeof(req), true);
+}
+
+int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta;
+ struct sk_buff *skb;
+
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* starec hdr trans */
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
+}
+
+int mt7996_mcu_rf_regval(struct mt7996_dev *dev, u32 regidx, u32 *val, bool set)
+{
+ struct {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+ __le16 idx;
+ u8 __rsv2[2];
+ __le32 ofs;
+ __le32 data;
+ } __packed *res, req = {
+ .tag = cpu_to_le16(UNI_CMD_ACCESS_RF_REG_BASIC),
+ .len = cpu_to_le16(sizeof(req) - 4),
+
+ .idx = cpu_to_le16(u32_get_bits(regidx, GENMASK(31, 24))),
+ .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(23, 0))),
+ .data = set ? cpu_to_le32(*val) : 0,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ if (set)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(REG_ACCESS),
+ &req, sizeof(req), true);
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(REG_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (void *)skb->data;
+ *val = le32_to_cpu(res->data);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val)
+{
+ struct {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+
+ union {
+ struct {
+ u8 type;
+ u8 __rsv2[3];
+ } __packed platform_type;
+ struct {
+ u8 type;
+ u8 dest;
+ u8 __rsv2[2];
+ } __packed bypass_mode;
+ struct {
+ u8 path;
+ u8 __rsv2[3];
+ } __packed txfree_path;
+ };
+ } __packed req = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+
+ switch (tag) {
+ case UNI_RRO_SET_PLATFORM_TYPE:
+ req.platform_type.type = val;
+ break;
+ case UNI_RRO_SET_BYPASS_MODE:
+ req.bypass_mode.type = val;
+ break;
+ case UNI_RRO_SET_TXFREE_PATH:
+ req.txfree_path.path = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RRO), &req,
+ sizeof(req), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
new file mode 100644
index 000000000000..6084b2337598
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -0,0 +1,669 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#ifndef __MT7996_MCU_H
+#define __MT7996_MCU_H
+
+#include "../mt76_connac_mcu.h"
+
+struct mt7996_mcu_rxd {
+ __le32 rxd[8];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ u8 option;
+ u8 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt7996_mcu_uni_event {
+ u8 cid;
+ u8 __rsv[3];
+ __le32 status; /* 0: success, others: fail */
+} __packed;
+
+struct mt7996_mcu_csa_notify {
+ struct mt7996_mcu_rxd rxd;
+
+ u8 omac_idx;
+ u8 csa_count;
+ u8 band_idx;
+ u8 rsv;
+} __packed;
+
+struct mt7996_mcu_rdd_report {
+ struct mt7996_mcu_rxd rxd;
+
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 band_idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 __rsv2;
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+ __le32 out_pri_stg_dmin;
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ u8 mdrdy_flag;
+ u8 tx_active;
+ } hw_pulse[32];
+} __packed;
+
+struct mt7996_mcu_background_chain_ctrl {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 chan; /* primary channel */
+ u8 central_chan; /* central channel */
+ u8 bw;
+ u8 tx_stream;
+ u8 rx_stream;
+
+ u8 monitor_chan; /* monitor channel */
+ u8 monitor_central_chan;/* monitor central channel */
+ u8 monitor_bw;
+ u8 monitor_tx_stream;
+ u8 monitor_rx_stream;
+
+ u8 scan_mode; /* 0: ScanStop
+ * 1: ScanStart
+ * 2: ScanRunning
+ */
+ u8 band_idx; /* DBDC */
+ u8 monitor_scan_type;
+ u8 band; /* 0: 2.4GHz, 1: 5GHz */
+ u8 rsv[2];
+} __packed;
+
+struct mt7996_mcu_eeprom {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 buffer_mode;
+ u8 format;
+ __le16 buf_len;
+} __packed;
+
+struct mt7996_mcu_phy_rx_info {
+ u8 category;
+ u8 rate;
+ u8 mode;
+ u8 nsts;
+ u8 gi;
+ u8 coding;
+ u8 stbc;
+ u8 bw;
+};
+
+struct mt7996_mcu_mib {
+ __le16 tag;
+ __le16 len;
+ __le32 offs;
+ __le64 data;
+} __packed;
+
+enum mt7996_chan_mib_offs {
+ UNI_MIB_OBSS_AIRTIME = 26,
+ UNI_MIB_NON_WIFI_TIME = 27,
+ UNI_MIB_TX_TIME = 28,
+ UNI_MIB_RX_TIME = 29
+};
+
+struct edca {
+ __le16 tag;
+ __le16 len;
+
+ u8 queue;
+ u8 set;
+ u8 cw_min;
+ u8 cw_max;
+ __le16 txop;
+ u8 aifs;
+ u8 __rsv;
+};
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_FW_LOG_WM,
+ MCU_FW_LOG_WA,
+ MCU_FW_LOG_TO_HOST,
+ MCU_FW_LOG_RELAY = 16
+};
+
+enum {
+ MCU_TWT_AGRT_ADD,
+ MCU_TWT_AGRT_MODIFY,
+ MCU_TWT_AGRT_DELETE,
+ MCU_TWT_AGRT_TEARDOWN,
+ MCU_TWT_AGRT_GET_TSF,
+};
+
+enum {
+ MCU_WA_PARAM_CMD_QUERY,
+ MCU_WA_PARAM_CMD_SET,
+ MCU_WA_PARAM_CMD_CAPABILITY,
+ MCU_WA_PARAM_CMD_DEBUG,
+};
+
+enum {
+ MCU_WA_PARAM_PDMA_RX = 0x04,
+ MCU_WA_PARAM_CPU_UTIL = 0x0b,
+ MCU_WA_PARAM_RED = 0x0e,
+ MCU_WA_PARAM_HW_PATH_HIF_VER = 0x2f,
+};
+
+enum mcu_mmps_mode {
+ MCU_MMPS_STATIC,
+ MCU_MMPS_DYNAMIC,
+ MCU_MMPS_RSV,
+ MCU_MMPS_DISABLE,
+};
+
+struct bss_rate_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 __rsv1[4];
+ __le16 bc_trans;
+ __le16 mc_trans;
+ u8 short_preamble;
+ u8 bc_fixed_rate;
+ u8 mc_fixed_rate;
+ u8 __rsv2[1];
+} __packed;
+
+struct bss_ra_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 short_preamble;
+ u8 force_sgi;
+ u8 force_gf;
+ u8 ht_mode;
+ u8 se_off;
+ u8 antenna_idx;
+ __le16 max_phyrate;
+ u8 force_tx_streams;
+ u8 __rsv[3];
+} __packed;
+
+struct bss_rlm_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 control_channel;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 ht_op_info;
+ u8 sco;
+ u8 band;
+ u8 __rsv[3];
+} __packed;
+
+struct bss_color_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 color;
+ u8 rsv[2];
+} __packed;
+
+struct bss_inband_discovery_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 tx_type;
+ u8 tx_mode;
+ u8 tx_interval;
+ u8 enable;
+ __le16 wcid;
+ __le16 prob_rsp_len;
+#define MAX_INBAND_FRAME_SIZE 512
+ u8 pkt[MAX_INBAND_FRAME_SIZE];
+} __packed;
+
+struct bss_bcn_content_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ie_pos;
+ __le16 csa_ie_pos;
+ __le16 bcc_ie_pos;
+ u8 enable;
+ u8 type;
+ __le16 pkt_len;
+#define MAX_BEACON_SIZE 512
+ u8 pkt[MAX_BEACON_SIZE];
+} __packed;
+
+struct bss_bcn_cntdwn_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 cnt;
+ u8 rsv[3];
+} __packed;
+
+struct bss_bcn_mbss_tlv {
+ __le16 tag;
+ __le16 len;
+ __le32 bitmap;
+#define MAX_BEACON_NUM 32
+ __le16 offset[MAX_BEACON_NUM];
+} __packed __aligned(4);
+
+struct bss_txcmd_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 txcmd_mode;
+ u8 __rsv[3];
+} __packed;
+
+struct bss_sec_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 __rsv1[2];
+ u8 cipher;
+ u8 __rsv2[1];
+} __packed;
+
+struct bss_power_save {
+ __le16 tag;
+ __le16 len;
+ u8 profile;
+ u8 _rsv[3];
+} __packed;
+
+struct bss_mld_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 group_mld_id;
+ u8 own_mld_id;
+ u8 mac_addr[ETH_ALEN];
+ u8 remap_idx;
+ u8 __rsv[3];
+} __packed;
+
+struct sta_rec_ba_uni {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+ u8 ba_rdd_rro;
+ u8 __rsv[3];
+} __packed;
+
+struct sec_key_uni {
+ __le16 wlan_idx;
+ u8 mgmt_prot;
+ u8 cipher_id;
+ u8 cipher_len;
+ u8 key_id;
+ u8 key_len;
+ u8 need_resp;
+ u8 key[32];
+} __packed;
+
+struct sta_rec_sec_uni {
+ __le16 tag;
+ __le16 len;
+ u8 add;
+ u8 n_cipher;
+ u8 rsv[2];
+
+ struct sec_key_uni key[2];
+} __packed;
+
+struct sta_rec_hdrt {
+ __le16 tag;
+ __le16 len;
+ u8 hdrt_mode;
+ u8 rsv[3];
+} __packed;
+
+struct sta_rec_hdr_trans {
+ __le16 tag;
+ __le16 len;
+ u8 from_ds;
+ u8 to_ds;
+ u8 dis_rx_hdr_tran;
+ u8 rsv;
+} __packed;
+
+struct hdr_trans_en {
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 check_bssid;
+ u8 mode;
+ u8 __rsv;
+} __packed;
+
+struct hdr_trans_vlan {
+ __le16 tag;
+ __le16 len;
+ u8 insert_vlan;
+ u8 remove_vlan;
+ u8 tid;
+ u8 __rsv;
+} __packed;
+
+struct hdr_trans_blacklist {
+ __le16 tag;
+ __le16 len;
+ u8 idx;
+ u8 enable;
+ __le16 type;
+} __packed;
+
+struct uni_header {
+ u8 __rsv[4];
+} __packed;
+
+struct vow_rx_airtime {
+ __le16 tag;
+ __le16 len;
+
+ u8 enable;
+ u8 band;
+ u8 __rsv[2];
+} __packed;
+
+struct bf_sounding_on {
+ __le16 tag;
+ __le16 len;
+
+ u8 snd_mode;
+ u8 sta_num;
+ u8 __rsv[2];
+ __le16 wlan_id[4];
+ __le32 snd_period;
+} __packed;
+
+struct bf_hw_en_status_update {
+ __le16 tag;
+ __le16 len;
+
+ bool ebf;
+ bool ibf;
+ u8 __rsv[2];
+} __packed;
+
+struct bf_mod_en_ctrl {
+ __le16 tag;
+ __le16 len;
+
+ u8 bf_num;
+ u8 bf_bitmap;
+ u8 bf_sel[8];
+ u8 __rsv[2];
+} __packed;
+
+union bf_tag_tlv {
+ struct bf_sounding_on bf_snd;
+ struct bf_hw_en_status_update bf_hw_en;
+ struct bf_mod_en_ctrl bf_mod_en;
+};
+
+struct ra_rate {
+ __le16 wlan_idx;
+ u8 mode;
+ u8 stbc;
+ __le16 gi;
+ u8 bw;
+ u8 ldpc;
+ u8 mcs;
+ u8 nss;
+ __le16 ltf;
+ u8 spe;
+ u8 preamble;
+ u8 __rsv[2];
+} __packed;
+
+struct ra_fixed_rate {
+ __le16 tag;
+ __le16 len;
+
+ __le16 version;
+ struct ra_rate rate;
+} __packed;
+
+enum {
+ UNI_RA_FIXED_RATE = 0xf,
+};
+
+#define MT7996_HDR_TRANS_MAX_SIZE (sizeof(struct hdr_trans_en) + \
+ sizeof(struct hdr_trans_vlan) + \
+ sizeof(struct hdr_trans_blacklist))
+
+enum {
+ UNI_HDR_TRANS_EN,
+ UNI_HDR_TRANS_VLAN,
+ UNI_HDR_TRANS_BLACKLIST,
+};
+
+enum {
+ RATE_PARAM_FIXED = 3,
+ RATE_PARAM_MMPS_UPDATE = 5,
+ RATE_PARAM_FIXED_HE_LTF = 7,
+ RATE_PARAM_FIXED_MCS,
+ RATE_PARAM_FIXED_GI = 11,
+ RATE_PARAM_AUTO = 20,
+};
+
+enum {
+ BF_SOUNDING_ON = 1,
+ BF_HW_EN_UPDATE = 17,
+ BF_MOD_EN_CTRL = 20,
+};
+
+enum {
+ CMD_BAND_NONE,
+ CMD_BAND_24G,
+ CMD_BAND_5G,
+ CMD_BAND_6G,
+};
+
+struct bss_req_hdr {
+ u8 bss_idx;
+ u8 __rsv[3];
+} __packed;
+
+enum {
+ UNI_CHANNEL_SWITCH,
+ UNI_CHANNEL_RX_PATH,
+};
+
+#define MT7996_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \
+ sizeof(struct mt76_connac_bss_basic_tlv) + \
+ sizeof(struct bss_rlm_tlv) + \
+ sizeof(struct bss_ra_tlv) + \
+ sizeof(struct bss_info_uni_he) + \
+ sizeof(struct bss_rate_tlv) + \
+ sizeof(struct bss_txcmd_tlv) + \
+ sizeof(struct bss_power_save) + \
+ sizeof(struct bss_sec_tlv) + \
+ sizeof(struct bss_mld_tlv))
+
+#define MT7996_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_bf) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_he_v2) + \
+ sizeof(struct sta_rec_ba_uni) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct sta_rec_bfee) + \
+ sizeof(struct sta_rec_phy) + \
+ sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_sec) + \
+ sizeof(struct sta_rec_ra_fixed) + \
+ sizeof(struct sta_rec_he_6g_capa) + \
+ sizeof(struct sta_rec_hdrt) + \
+ sizeof(struct sta_rec_hdr_trans) + \
+ sizeof(struct tlv))
+
+#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
+ sizeof(struct bss_bcn_content_tlv) + \
+ sizeof(struct bss_bcn_cntdwn_tlv) + \
+ sizeof(struct bss_bcn_mbss_tlv))
+
+#define MT7996_INBAND_FRAME_SIZE (sizeof(struct bss_req_hdr) + \
+ sizeof(struct bss_inband_discovery_tlv))
+
+enum {
+ UNI_BAND_CONFIG_RADIO_ENABLE,
+ UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08,
+};
+
+enum {
+ UNI_WSYS_CONFIG_FW_LOG_CTRL,
+ UNI_WSYS_CONFIG_FW_DBG_CTRL,
+};
+
+enum {
+ UNI_RDD_CTRL_PARM,
+ UNI_RDD_CTRL_SET_TH = 0x3,
+};
+
+enum {
+ UNI_EFUSE_ACCESS = 1,
+ UNI_EFUSE_BUFFER_MODE,
+ UNI_EFUSE_FREE_BLOCK,
+ UNI_EFUSE_BUFFER_RD,
+};
+
+enum {
+ UNI_VOW_DRR_CTRL,
+ UNI_VOW_RX_AT_AIRTIME_EN = 0x0b,
+ UNI_VOW_RX_AT_AIRTIME_CLR_EN = 0x0e,
+};
+
+enum {
+ UNI_CMD_MIB_DATA,
+};
+
+enum {
+ UNI_POWER_OFF,
+};
+
+enum {
+ UNI_CMD_TWT_ARGT_UPDATE = 0x0,
+ UNI_CMD_TWT_MGMT_OFFLOAD,
+};
+
+enum {
+ UNI_RRO_DEL_ENTRY = 0x1,
+ UNI_RRO_SET_PLATFORM_TYPE,
+ UNI_RRO_GET_BA_SESSION_TABLE,
+ UNI_RRO_SET_BYPASS_MODE,
+ UNI_RRO_SET_TXFREE_PATH,
+};
+
+enum{
+ UNI_CMD_SR_ENABLE = 0x1,
+ UNI_CMD_SR_ENABLE_SD,
+ UNI_CMD_SR_ENABLE_MODE,
+ UNI_CMD_SR_ENABLE_DPD = 0x12,
+ UNI_CMD_SR_ENABLE_TX,
+ UNI_CMD_SR_SET_SRG_BITMAP = 0x80,
+ UNI_CMD_SR_SET_PARAM = 0xc1,
+ UNI_CMD_SR_SET_SIGA = 0xd0,
+};
+
+enum {
+ UNI_CMD_ACCESS_REG_BASIC = 0x0,
+ UNI_CMD_ACCESS_RF_REG_BASIC,
+};
+
+enum {
+ UNI_CMD_SER_QUERY = 0x0,
+ UNI_CMD_SER_SET = 0x2,
+ UNI_CMD_SER_TRIGGER = 0x3,
+};
+
+enum {
+ SER_QUERY,
+ /* recovery */
+ SER_SET_RECOVER_L1,
+ SER_SET_RECOVER_L2,
+ SER_SET_RECOVER_L3_RX_ABORT,
+ SER_SET_RECOVER_L3_TX_ABORT,
+ SER_SET_RECOVER_L3_TX_DISABLE,
+ SER_SET_RECOVER_L3_BF,
+ /* action */
+ SER_ENABLE = 2,
+ SER_RECOVER
+};
+
+enum {
+ MT7996_SEC_MODE_PLAIN,
+ MT7996_SEC_MODE_AES,
+ MT7996_SEC_MODE_SCRAMBLE,
+ MT7996_SEC_MODE_MAX,
+};
+
+#define MT7996_PATCH_SEC GENMASK(31, 24)
+#define MT7996_PATCH_SCRAMBLE_KEY GENMASK(15, 8)
+#define MT7996_PATCH_AES_KEY GENMASK(7, 0)
+
+#define MT7996_SEC_ENCRYPT BIT(0)
+#define MT7996_SEC_KEY_IDX GENMASK(2, 1)
+#define MT7996_SEC_IV BIT(3)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
new file mode 100644
index 000000000000..521769eb6b0e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7996.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const struct __base mt7996_reg_base[] = {
+ [WF_AGG_BASE] = { { 0x820e2000, 0x820f2000, 0x830e2000 } },
+ [WF_ARB_BASE] = { { 0x820e3000, 0x820f3000, 0x830e3000 } },
+ [WF_TMAC_BASE] = { { 0x820e4000, 0x820f4000, 0x830e4000 } },
+ [WF_RMAC_BASE] = { { 0x820e5000, 0x820f5000, 0x830e5000 } },
+ [WF_DMA_BASE] = { { 0x820e7000, 0x820f7000, 0x830e7000 } },
+ [WF_WTBLOFF_BASE] = { { 0x820e9000, 0x820f9000, 0x830e9000 } },
+ [WF_ETBF_BASE] = { { 0x820ea000, 0x820fa000, 0x830ea000 } },
+ [WF_LPON_BASE] = { { 0x820eb000, 0x820fb000, 0x830eb000 } },
+ [WF_MIB_BASE] = { { 0x820ed000, 0x820fd000, 0x830ed000 } },
+};
+
+static const struct __map mt7996_reg_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x1000 }, /* WFDMA reserved */
+ { 0x57000000, 0x05000, 0x1000 }, /* WFDMA MCU wrap CR */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
+ { 0x74030000, 0x10000, 0x1000 }, /* PCIe MAC */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820cc000, 0xa5000, 0x2000 }, /* WF_LMAC_TOP BN1 (WF_MUCOP) */
+ { 0x820c4000, 0xa8000, 0x4000 }, /* WF_LMAC_TOP BN1 (WF_MUCOP) */
+ { 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, wfdma */
+ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+
+ dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
+
+ return MT_HIF_REMAP_BASE_L1 + offset;
+}
+
+static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+
+ return MT_HIF_REMAP_BASE_L2 + offset;
+}
+
+static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
+{
+ /* remap to ori status */
+ if (unlikely(dev->reg_l1_backup)) {
+ dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
+ dev->reg_l1_backup = 0;
+ }
+
+ if (dev->reg_l2_backup) {
+ dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
+ dev->reg_l2_backup = 0;
+ }
+}
+
+static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
+{
+ int i;
+
+ mt7996_reg_remap_restore(dev);
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < dev->reg.map_size; i++) {
+ u32 ofs;
+
+ if (addr < dev->reg.map[i].phys)
+ continue;
+
+ ofs = addr - dev->reg.map[i].phys;
+ if (ofs > dev->reg.map[i].size)
+ continue;
+
+ return dev->reg.map[i].mapped + ofs;
+ }
+
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+ return mt7996_reg_map_l1(dev, addr);
+
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ return mt7996_reg_map_l1(dev, addr);
+
+ /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+ if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
+ addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
+ return mt7996_reg_map_l1(dev, addr);
+ }
+
+ return mt7996_reg_map_l2(dev, addr);
+}
+
+static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+
+ return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
+}
+
+static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+
+ dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
+}
+
+static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+
+ return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
+}
+
+static int mt7996_mmio_init(struct mt76_dev *mdev,
+ void __iomem *mem_base,
+ u32 device_id)
+{
+ struct mt76_bus_ops *bus_ops;
+ struct mt7996_dev *dev;
+
+ dev = container_of(mdev, struct mt7996_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+
+ switch (device_id) {
+ case 0x7990:
+ dev->reg.base = mt7996_reg_base;
+ dev->reg.map = mt7996_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7996_rr;
+ bus_ops->wr = mt7996_wr;
+ bus_ops->rmw = mt7996_rmw;
+ dev->mt76.bus = bus_ops;
+
+ mdev->rev = (device_id << 16) | (mt76_rr(dev, MT_HW_REV) & 0xff);
+
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ return 0;
+}
+
+void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static void mt7996_rx_poll_complete(struct mt76_dev *mdev,
+ enum mt76_rxq_id q)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+
+ mt7996_irq_enable(dev, MT_INT_RX(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static void mt7996_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mt7996_dev *dev = from_tasklet(dev, t, irq_tasklet);
+ u32 i, intr, mask, intr1;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+ mt7996_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ if ((intr & MT_INT_RX(i)))
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+}
+
+irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7996_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7996_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE |
+ MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7996_TOKEN_SIZE,
+ .tx_prepare_skb = mt7996_tx_prepare_skb,
+ .tx_complete_skb = mt7996_tx_complete_skb,
+ .rx_skb = mt7996_queue_rx_skb,
+ .rx_check = mt7996_rx_check,
+ .rx_poll_complete = mt7996_rx_poll_complete,
+ .sta_ps = mt7996_sta_ps,
+ .sta_add = mt7996_mac_sta_add,
+ .sta_remove = mt7996_mac_sta_remove,
+ .update_survey = mt7996_update_channel,
+ };
+ struct mt7996_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7996_ops, &drv_ops);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = container_of(mdev, struct mt7996_dev, mt76);
+
+ ret = mt7996_mmio_init(mdev, mem_base, device_id);
+ if (ret)
+ goto error;
+
+ tasklet_setup(&dev->irq_tasklet, mt7996_irq_tasklet);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ return dev;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ERR_PTR(ret);
+}
+
+static int __init mt7996_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7996_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7996_pci_driver);
+ if (ret)
+ pci_unregister_driver(&mt7996_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7996_exit(void)
+{
+ pci_unregister_driver(&mt7996_pci_driver);
+ pci_unregister_driver(&mt7996_hif_driver);
+}
+
+module_init(mt7996_init);
+module_exit(mt7996_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
new file mode 100644
index 000000000000..725344791b4c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -0,0 +1,523 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#ifndef __MT7996_H
+#define __MT7996_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76_connac.h"
+#include "regs.h"
+
+#define MT7996_MAX_INTERFACES 19
+#define MT7996_MAX_WMM_SETS 4
+#define MT7996_WTBL_SIZE 544
+#define MT7996_WTBL_RESERVED (MT7996_WTBL_SIZE - 1)
+#define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \
+ MT7996_MAX_INTERFACES)
+
+#define MT7996_WATCHDOG_TIME (HZ / 10)
+#define MT7996_RESET_TIMEOUT (30 * HZ)
+
+#define MT7996_TX_RING_SIZE 2048
+#define MT7996_TX_MCU_RING_SIZE 256
+#define MT7996_TX_FWDL_RING_SIZE 128
+
+#define MT7996_RX_RING_SIZE 1536
+#define MT7996_RX_MCU_RING_SIZE 512
+
+#define MT7996_FIRMWARE_WA "mediatek/mt7996/mt7996_wa.bin"
+#define MT7996_FIRMWARE_WM "mediatek/mt7996/mt7996_wm.bin"
+#define MT7996_ROM_PATCH "mediatek/mt7996/mt7996_rom_patch.bin"
+
+#define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin"
+#define MT7996_EEPROM_SIZE 7680
+#define MT7996_EEPROM_BLOCK_SIZE 16
+#define MT7996_TOKEN_SIZE 8192
+
+#define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
+#define MT7996_MAX_TWT_AGRT 16
+#define MT7996_MAX_STA_TWT_AGRT 8
+#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
+
+struct mt7996_vif;
+struct mt7996_sta;
+struct mt7996_dfs_pulse;
+struct mt7996_dfs_pattern;
+
+enum mt7996_txq_id {
+ MT7996_TXQ_FWDL = 16,
+ MT7996_TXQ_MCU_WM,
+ MT7996_TXQ_BAND0,
+ MT7996_TXQ_BAND1,
+ MT7996_TXQ_MCU_WA,
+ MT7996_TXQ_BAND2,
+};
+
+enum mt7996_rxq_id {
+ MT7996_RXQ_MCU_WM = 0,
+ MT7996_RXQ_MCU_WA,
+ MT7996_RXQ_MCU_WA_MAIN = 2,
+ MT7996_RXQ_MCU_WA_EXT = 2,/* unused */
+ MT7996_RXQ_MCU_WA_TRI = 3,
+ MT7996_RXQ_BAND0 = 4,
+ MT7996_RXQ_BAND1 = 4,/* unused */
+ MT7996_RXQ_BAND2 = 5,
+};
+
+struct mt7996_twt_flow {
+ struct list_head list;
+ u64 start_tsf;
+ u64 tsf;
+ u32 duration;
+ u16 wcid;
+ __le16 mantissa;
+ u8 exp;
+ u8 table_id;
+ u8 id;
+ u8 protection:1;
+ u8 flowtype:1;
+ u8 trigger:1;
+ u8 sched:1;
+};
+
+DECLARE_EWMA(avg_signal, 10, 8)
+
+struct mt7996_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7996_vif *vif;
+
+ struct list_head poll_list;
+ struct list_head rc_list;
+ u32 airtime_ac[8];
+
+ int ack_signal;
+ struct ewma_avg_signal avg_ack_signal;
+
+ unsigned long changed;
+ unsigned long jiffies;
+ unsigned long ampdu_state;
+
+ struct mt76_sta_stats stats;
+
+ struct mt76_connac_sta_key_conf bip;
+
+ struct {
+ u8 flowid_mask;
+ struct mt7996_twt_flow flow[MT7996_MAX_STA_TWT_AGRT];
+ } twt;
+};
+
+struct mt7996_vif_cap {
+ bool ht_ldpc:1;
+ bool vht_ldpc:1;
+ bool he_ldpc:1;
+ bool vht_su_ebfer:1;
+ bool vht_su_ebfee:1;
+ bool vht_mu_ebfer:1;
+ bool vht_mu_ebfee:1;
+ bool he_su_ebfer:1;
+ bool he_su_ebfee:1;
+ bool he_mu_ebfer:1;
+};
+
+struct mt7996_vif {
+ struct mt76_vif mt76; /* must be first */
+
+ struct mt7996_vif_cap cap;
+ struct mt7996_sta sta;
+ struct mt7996_phy *phy;
+
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct cfg80211_bitrate_mask bitrate_mask;
+};
+
+/* per-phy stats. */
+struct mib_stats {
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+ u32 ba_miss_cnt;
+ u32 tx_mu_bf_cnt;
+ u32 tx_mu_mpdu_cnt;
+ u32 tx_mu_acked_mpdu_cnt;
+ u32 tx_su_acked_mpdu_cnt;
+ u32 tx_bf_ibf_ppdu_cnt;
+ u32 tx_bf_ebf_ppdu_cnt;
+
+ u32 tx_bf_rx_fb_all_cnt;
+ u32 tx_bf_rx_fb_eht_cnt;
+ u32 tx_bf_rx_fb_he_cnt;
+ u32 tx_bf_rx_fb_vht_cnt;
+ u32 tx_bf_rx_fb_ht_cnt;
+
+ u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */
+ u32 tx_bf_rx_fb_nc_cnt;
+ u32 tx_bf_rx_fb_nr_cnt;
+ u32 tx_bf_fb_cpl_cnt;
+ u32 tx_bf_fb_trig_cnt;
+
+ u32 tx_ampdu_cnt;
+ u32 tx_stop_q_empty_cnt;
+ u32 tx_mpdu_attempts_cnt;
+ u32 tx_mpdu_success_cnt;
+ /* BF counter is PPDU-based, so remove MPDU-based BF counter */
+
+ u32 tx_rwp_fail_cnt;
+ u32 tx_rwp_need_cnt;
+
+ /* rx stats */
+ u32 rx_fifo_full_cnt;
+ u32 channel_idle_cnt;
+ u32 rx_vector_mismatch_cnt;
+ u32 rx_delimiter_fail_cnt;
+ u32 rx_len_mismatch_cnt;
+ u32 rx_mpdu_cnt;
+ u32 rx_ampdu_cnt;
+ u32 rx_ampdu_bytes_cnt;
+ u32 rx_ampdu_valid_subframe_cnt;
+ u32 rx_ampdu_valid_subframe_bytes_cnt;
+ u32 rx_pfdrop_cnt;
+ u32 rx_vec_queue_overflow_drop_cnt;
+ u32 rx_ba_cnt;
+
+ u32 tx_amsdu[8];
+ u32 tx_amsdu_cnt;
+};
+
+struct mt7996_hif {
+ struct list_head list;
+
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+};
+
+struct mt7996_phy {
+ struct mt76_phy *mt76;
+ struct mt7996_dev *dev;
+
+ struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+
+ struct ieee80211_vif *monitor_vif;
+
+ u32 rxfilter;
+ u64 omac_mask;
+
+ u16 noise;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 rdd_state;
+
+ u32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+ struct mt76_channel_state state_ts;
+};
+
+struct mt7996_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mt7996_hif *hif2;
+ struct mt7996_reg_desc reg;
+ u8 q_id[MT7996_MAX_QUEUE];
+ u32 q_int_mask[MT7996_MAX_QUEUE];
+ u32 q_wfdma_mask;
+
+ const struct mt76_bus_ops *bus_ops;
+ struct tasklet_struct irq_tasklet;
+ struct mt7996_phy phy;
+
+ /* monitor rx chain configured channel */
+ struct cfg80211_chan_def rdd2_chandef;
+ struct mt7996_phy *rdd2_phy;
+
+ u16 chainmask;
+ u8 chainshift[__MT_MAX_BAND];
+ u32 hif_idx;
+
+ struct work_struct init_work;
+ struct work_struct rc_work;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
+
+ struct list_head sta_rc_list;
+ struct list_head sta_poll_list;
+ struct list_head twt_list;
+ spinlock_t sta_poll_lock;
+
+ u32 hw_pattern;
+
+ bool dbdc_support:1;
+ bool tbtc_support:1;
+ bool flash_mode:1;
+
+ bool ibf;
+ u8 fw_debug_wm;
+ u8 fw_debug_wa;
+ u8 fw_debug_bin;
+ u16 fw_debug_seq;
+
+ struct dentry *debugfs_dir;
+ struct rchan *relay_fwlog;
+
+ struct {
+ u8 table_mask;
+ u8 n_agrt;
+ } twt;
+
+ u32 reg_l1_backup;
+ u32 reg_l2_backup;
+};
+
+enum {
+ WFDMA0 = 0x0,
+ WFDMA1,
+ WFDMA_EXT,
+ __MT_WFDMA_MAX,
+};
+
+enum {
+ MT_CTX0,
+ MT_HIF0 = 0x0,
+
+ MT_LMAC_AC00 = 0x0,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+};
+
+enum {
+ MT_RX_SEL0,
+ MT_RX_SEL1,
+ MT_RX_SEL2, /* monitor chain */
+};
+
+enum mt7996_rdd_cmd {
+ RDD_STOP,
+ RDD_START,
+ RDD_DET_MODE,
+ RDD_RADAR_EMULATE,
+ RDD_START_TXQ = 20,
+ RDD_CAC_START = 50,
+ RDD_CAC_END,
+ RDD_NORMAL_START,
+ RDD_DISABLE_DFS_CAL,
+ RDD_PULSE_DBG,
+ RDD_READ_PULSE,
+ RDD_RESUME_BF,
+ RDD_IRQ_OFF,
+};
+
+static inline struct mt7996_phy *
+mt7996_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7996_dev *
+mt7996_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7996_dev, mt76);
+}
+
+static inline struct mt7996_phy *
+__mt7996_phy(struct mt7996_dev *dev, enum mt76_band_id band)
+{
+ struct mt76_phy *phy = dev->mt76.phys[band];
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
+static inline struct mt7996_phy *
+mt7996_phy2(struct mt7996_dev *dev)
+{
+ return __mt7996_phy(dev, MT_BAND1);
+}
+
+static inline struct mt7996_phy *
+mt7996_phy3(struct mt7996_dev *dev)
+{
+ return __mt7996_phy(dev, MT_BAND2);
+}
+
+extern const struct ieee80211_ops mt7996_ops;
+extern struct pci_driver mt7996_pci_driver;
+extern struct pci_driver mt7996_hif_driver;
+
+struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+void mt7996_wfsys_reset(struct mt7996_dev *dev);
+irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
+u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif);
+int mt7996_register_device(struct mt7996_dev *dev);
+void mt7996_unregister_device(struct mt7996_dev *dev);
+int mt7996_eeprom_init(struct mt7996_dev *dev);
+int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy);
+int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
+ struct ieee80211_channel *chan);
+s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band);
+int mt7996_dma_init(struct mt7996_dev *dev);
+void mt7996_dma_prefetch(struct mt7996_dev *dev);
+void mt7996_dma_cleanup(struct mt7996_dev *dev);
+int mt7996_mcu_init(struct mt7996_dev *dev);
+int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ struct mt7996_vif *mvif,
+ struct mt7996_twt_flow *flow,
+ int cmd);
+int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
+ struct ieee80211_vif *vif, bool enable);
+int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
+ struct ieee80211_vif *vif, int enable);
+int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color);
+int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int enable);
+int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif, u32 changed);
+int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool changed);
+int mt7996_set_channel(struct mt7996_phy *phy);
+int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
+int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
+int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
+ void *data, u16 version);
+int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
+int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset);
+int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
+int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 set, u8 band);
+int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action);
+int mt7996_mcu_set_fcc5_lpn(struct mt7996_dev *dev, int val);
+int mt7996_mcu_set_pulse_th(struct mt7996_dev *dev,
+ const struct mt7996_dfs_pulse *pulse);
+int mt7996_mcu_set_radar_th(struct mt7996_dev *dev, int index,
+ const struct mt7996_dfs_pattern *pattern);
+int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable);
+void mt7996_mcu_set_pm(void *priv, u8 *mac, struct ieee80211_vif *vif);
+int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val);
+int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch);
+int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val);
+int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
+ struct cfg80211_chan_def *chandef);
+int mt7996_mcu_rf_regval(struct mt7996_dev *dev, u32 regidx, u32 *val, bool set);
+int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans);
+int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val);
+int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
+int mt7996_mcu_fw_log_2_host(struct mt7996_dev *dev, u8 type, u8 ctrl);
+int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level);
+void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb);
+void mt7996_mcu_exit(struct mt7996_dev *dev);
+
+void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
+ u32 clear, u32 set);
+
+static inline void mt7996_irq_enable(struct mt7996_dev *dev, u32 mask)
+{
+ if (dev->hif2)
+ mt7996_dual_hif_set_irq_mask(dev, false, 0, mask);
+ else
+ mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+ tasklet_schedule(&dev->irq_tasklet);
+}
+
+static inline void mt7996_irq_disable(struct mt7996_dev *dev, u32 mask)
+{
+ if (dev->hif2)
+ mt7996_dual_hif_set_irq_mask(dev, true, mask, 0);
+ else
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw);
+bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask);
+void mt7996_mac_reset_counters(struct mt7996_phy *phy);
+void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy);
+void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band);
+void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+ struct ieee80211_key_conf *key, u32 changed);
+void mt7996_mac_set_timing(struct mt7996_phy *phy);
+int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7996_mac_work(struct work_struct *work);
+void mt7996_mac_reset_work(struct work_struct *work);
+void mt7996_mac_sta_rc_work(struct work_struct *work);
+void mt7996_mac_update_stats(struct mt7996_phy *phy);
+void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
+ struct mt7996_sta *msta,
+ u8 flowid);
+void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt);
+int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt7996_tx_token_put(struct mt7996_dev *dev);
+void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb, u32 *info);
+bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
+void mt7996_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+void mt7996_stats_work(struct work_struct *work);
+int mt76_dfs_start_rdd(struct mt7996_dev *dev, bool force);
+int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy);
+void mt7996_set_stream_he_caps(struct mt7996_phy *phy);
+void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy);
+void mt7996_update_channel(struct mt76_phy *mphy);
+int mt7996_init_debugfs(struct mt7996_phy *phy);
+void mt7996_debugfs_rx_fw_monitor(struct mt7996_dev *dev, const void *data, int len);
+bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
+int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd);
+int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+#ifdef CONFIG_MAC80211_DEBUGFS
+void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
new file mode 100644
index 000000000000..64aee3fb5445
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7996.h"
+#include "mac.h"
+#include "../trace.h"
+
+static LIST_HEAD(hif_list);
+static DEFINE_SPINLOCK(hif_lock);
+static u32 hif_idx;
+
+static const struct pci_device_id mt7996_pci_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7990) },
+ { },
+};
+
+static const struct pci_device_id mt7996_hif_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7991) },
+ { },
+};
+
+static struct mt7996_hif *mt7996_pci_get_hif2(u32 idx)
+{
+ struct mt7996_hif *hif;
+ u32 val;
+
+ spin_lock_bh(&hif_lock);
+
+ list_for_each_entry(hif, &hif_list, list) {
+ val = readl(hif->regs + MT_PCIE_RECOG_ID);
+ val &= MT_PCIE_RECOG_ID_MASK;
+ if (val != idx)
+ continue;
+
+ get_device(hif->dev);
+ goto out;
+ }
+ hif = NULL;
+
+out:
+ spin_unlock_bh(&hif_lock);
+
+ return hif;
+}
+
+static void mt7996_put_hif2(struct mt7996_hif *hif)
+{
+ if (!hif)
+ return;
+
+ put_device(hif->dev);
+}
+
+static struct mt7996_hif *mt7996_pci_init_hif2(struct pci_dev *pdev)
+{
+ hif_idx++;
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL))
+ return NULL;
+
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
+
+ return mt7996_pci_get_hif2(hif_idx);
+}
+
+static int mt7996_pci_hif2_probe(struct pci_dev *pdev)
+{
+ struct mt7996_hif *hif;
+
+ hif = devm_kzalloc(&pdev->dev, sizeof(*hif), GFP_KERNEL);
+ if (!hif)
+ return -ENOMEM;
+
+ hif->dev = &pdev->dev;
+ hif->regs = pcim_iomap_table(pdev)[0];
+ hif->irq = pdev->irq;
+ spin_lock_bh(&hif_lock);
+ list_add(&hif->list, &hif_list);
+ spin_unlock_bh(&hif_lock);
+ pci_set_drvdata(pdev, hif);
+
+ return 0;
+}
+
+static int mt7996_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pci_dev *hif2_dev;
+ struct mt7996_dev *dev;
+ struct mt76_dev *mdev;
+ struct mt7996_hif *hif2;
+ int irq, ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mt76_pci_disable_aspm(pdev);
+
+ if (id->device == 0x7991)
+ return mt7996_pci_hif2_probe(pdev);
+
+ dev = mt7996_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ id->device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
+ mt7996_wfsys_reset(dev);
+ hif2 = mt7996_pci_init_hif2(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_device;
+
+ irq = pdev->irq;
+ ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_irq_vector;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ if (hif2) {
+ hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
+ dev->hif2 = hif2;
+
+ ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_hif2;
+
+ dev->hif2->irq = hif2_dev->irq;
+ ret = devm_request_irq(mdev->dev, dev->hif2->irq,
+ mt7996_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME "-hif", dev);
+ if (ret)
+ goto free_hif2_irq_vector;
+
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ }
+
+ ret = mt7996_register_device(dev);
+ if (ret)
+ goto free_hif2_irq;
+
+ return 0;
+
+free_hif2_irq:
+ if (dev->hif2)
+ devm_free_irq(mdev->dev, dev->hif2->irq, dev);
+free_hif2_irq_vector:
+ if (dev->hif2)
+ pci_free_irq_vectors(hif2_dev);
+free_hif2:
+ if (dev->hif2)
+ put_device(dev->hif2->dev);
+ devm_free_irq(mdev->dev, irq, dev);
+free_irq_vector:
+ pci_free_irq_vectors(pdev);
+free_device:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7996_hif_remove(struct pci_dev *pdev)
+{
+ struct mt7996_hif *hif = pci_get_drvdata(pdev);
+
+ list_del(&hif->list);
+}
+
+static void mt7996_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev;
+ struct mt7996_dev *dev;
+
+ mdev = pci_get_drvdata(pdev);
+ dev = container_of(mdev, struct mt7996_dev, mt76);
+ mt7996_put_hif2(dev->hif2);
+ mt7996_unregister_device(dev);
+}
+
+struct pci_driver mt7996_hif_driver = {
+ .name = KBUILD_MODNAME "_hif",
+ .id_table = mt7996_hif_device_table,
+ .probe = mt7996_pci_probe,
+ .remove = mt7996_hif_remove,
+};
+
+struct pci_driver mt7996_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7996_pci_device_table,
+ .probe = mt7996_pci_probe,
+ .remove = mt7996_pci_remove,
+};
+
+MODULE_DEVICE_TABLE(pci, mt7996_pci_device_table);
+MODULE_DEVICE_TABLE(pci, mt7996_hif_device_table);
+MODULE_FIRMWARE(MT7996_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7996_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7996_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
new file mode 100644
index 000000000000..794f61b93a46
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -0,0 +1,542 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#ifndef __MT7996_REGS_H
+#define __MT7996_REGS_H
+
+struct __map {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+};
+
+struct __base {
+ u32 band_base[__MT_MAX_BAND];
+};
+
+/* used to differentiate between generations */
+struct mt7996_reg_desc {
+ const struct __base *base;
+ const struct __map *map;
+ u32 map_size;
+};
+
+enum base_rev {
+ WF_AGG_BASE,
+ WF_ARB_BASE,
+ WF_TMAC_BASE,
+ WF_RMAC_BASE,
+ WF_DMA_BASE,
+ WF_WTBLOFF_BASE,
+ WF_ETBF_BASE,
+ WF_LPON_BASE,
+ WF_MIB_BASE,
+ __MT_REG_BASE_MAX,
+};
+
+#define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)])
+
+#define MT_MCU_INT_EVENT 0x2108
+#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
+/* PLE */
+#define MT_PLE_BASE 0x820c0000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_FL_Q_EMPTY MT_PLE(0x360)
+#define MT_FL_Q0_CTRL MT_PLE(0x3e0)
+#define MT_FL_Q2_CTRL MT_PLE(0x3e8)
+#define MT_FL_Q3_CTRL MT_PLE(0x3ec)
+
+#define MT_PLE_FREEPG_CNT MT_PLE(0x380)
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x384)
+#define MT_PLE_PG_HIF_GROUP MT_PLE(0x00c)
+#define MT_PLE_HIF_PG_INFO MT_PLE(0x388)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x600 + 0x80 * (ac) + ((n) << 2))
+#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
+
+/* WF MDP TOP */
+#define MT_MDP_BASE 0x820cc000
+#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
+
+#define MT_MDP_DCR2 MT_MDP(0x8e8)
+#define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2)
+
+/* TMAC: band 0(0x820e4000), band 1(0x820f4000), band 2(0x830e4000) */
+#define MT_WF_TMAC_BASE(_band) __BASE(WF_TMAC_BASE, (_band))
+#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
+
+#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
+#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
+
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x0c8)
+#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x0cc)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x014)
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x018)
+#define MT_IFS_EIFS_CCK GENMASK(8, 0)
+
+/* WF DMA TOP: band 0(0x820e7000), band 1(0x820f7000), band 2(0x830e7000) */
+#define MT_WF_DMA_BASE(_band) __BASE(WF_DMA_BASE, (_band))
+#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
+
+#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
+#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+
+#define MT_DMA_TCRF1(_band) MT_WF_DMA(_band, 0x054)
+#define MT_DMA_TCRF1_QIDX GENMASK(15, 13)
+
+/* WTBLOFF TOP: band 0(0x820e9000), band 1(0x820f9000), band 2(0x830e9000) */
+#define MT_WTBLOFF_BASE(_band) __BASE(WF_WTBLOFF_BASE, (_band))
+#define MT_WTBLOFF(_band, ofs) (MT_WTBLOFF_BASE(_band) + (ofs))
+
+#define MT_WTBLOFF_RSCR(_band) MT_WTBLOFF(_band, 0x008)
+#define MT_WTBLOFF_RSCR_RCPI_MODE GENMASK(31, 30)
+#define MT_WTBLOFF_RSCR_RCPI_PARAM GENMASK(25, 24)
+
+/* ETBF: band 0(0x820ea000), band 1(0x820fa000), band 2(0x830ea000) */
+#define MT_WF_ETBF_BASE(_band) __BASE(WF_ETBF_BASE, (_band))
+#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
+
+#define MT_ETBF_RX_FB_CONT(_band) MT_WF_ETBF(_band, 0x100)
+#define MT_ETBF_RX_FB_BW GENMASK(10, 8)
+#define MT_ETBF_RX_FB_NC GENMASK(7, 4)
+#define MT_ETBF_RX_FB_NR GENMASK(3, 0)
+
+/* LPON: band 0(0x820eb000), band 1(0x820fb000), band 2(0x830eb000) */
+#define MT_WF_LPON_BASE(_band) __BASE(WF_LPON_BASE, (_band))
+#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
+
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x360)
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x364)
+#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, 0x37c)
+
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (((n) * 4) << 4))
+#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_SW_WRITE BIT(0)
+#define MT_LPON_TCR_SW_ADJUST BIT(1)
+#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
+
+/* MIB: band 0(0x820ed000), band 1(0x820fd000), band 2(0x830ed000)*/
+/* These counters are (mostly?) clear-on-read. So, some should not
+ * be read at all in case firmware is already reading them. These
+ * are commented with 'DNR' below. The DNR stats will be read by querying
+ * the firmware API for the appropriate message. For counters the driver
+ * does read, the driver should accumulate the counters.
+ */
+#define MT_WF_MIB_BASE(_band) __BASE(WF_MIB_BASE, (_band))
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+
+#define MT_MIB_BSCR0(_band) MT_WF_MIB(_band, 0x9cc)
+#define MT_MIB_BSCR1(_band) MT_WF_MIB(_band, 0x9d0)
+#define MT_MIB_BSCR2(_band) MT_WF_MIB(_band, 0x9d4)
+#define MT_MIB_BSCR3(_band) MT_WF_MIB(_band, 0x9d8)
+#define MT_MIB_BSCR4(_band) MT_WF_MIB(_band, 0x9dc)
+#define MT_MIB_BSCR5(_band) MT_WF_MIB(_band, 0x9e0)
+#define MT_MIB_BSCR6(_band) MT_WF_MIB(_band, 0x9e4)
+#define MT_MIB_BSCR7(_band) MT_WF_MIB(_band, 0x9e8)
+#define MT_MIB_BSCR17(_band) MT_WF_MIB(_band, 0xa10)
+
+#define MT_MIB_TSCR5(_band) MT_WF_MIB(_band, 0x6c4)
+#define MT_MIB_TSCR6(_band) MT_WF_MIB(_band, 0x6c8)
+#define MT_MIB_TSCR7(_band) MT_WF_MIB(_band, 0x6d0)
+
+#define MT_MIB_RSCR1(_band) MT_WF_MIB(_band, 0x7ac)
+/* rx mpdu counter, full 32 bits */
+#define MT_MIB_RSCR31(_band) MT_WF_MIB(_band, 0x964)
+#define MT_MIB_RSCR33(_band) MT_WF_MIB(_band, 0x96c)
+
+#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
+#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_RVSR0(_band) MT_WF_MIB(_band, 0x720)
+
+#define MT_MIB_RSCR35(_band) MT_WF_MIB(_band, 0x974)
+#define MT_MIB_RSCR36(_band) MT_WF_MIB(_band, 0x978)
+
+/* tx ampdu cnt, full 32 bits */
+#define MT_MIB_TSCR0(_band) MT_WF_MIB(_band, 0x6b0)
+#define MT_MIB_TSCR2(_band) MT_WF_MIB(_band, 0x6b8)
+
+/* counts all mpdus in ampdu, regardless of success */
+#define MT_MIB_TSCR3(_band) MT_WF_MIB(_band, 0x6bc)
+
+/* counts all successfully tx'd mpdus in ampdu */
+#define MT_MIB_TSCR4(_band) MT_WF_MIB(_band, 0x6c0)
+
+/* rx ampdu count, 32-bit */
+#define MT_MIB_RSCR27(_band) MT_WF_MIB(_band, 0x954)
+
+/* rx ampdu bytes count, 32-bit */
+#define MT_MIB_RSCR28(_band) MT_WF_MIB(_band, 0x958)
+
+/* rx ampdu valid subframe count */
+#define MT_MIB_RSCR29(_band) MT_WF_MIB(_band, 0x95c)
+
+/* rx ampdu valid subframe bytes count, 32bits */
+#define MT_MIB_RSCR30(_band) MT_WF_MIB(_band, 0x960)
+
+/* remaining windows protected stats */
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x080)
+#define MT_MIB_SDR27_TX_RWP_FAIL_CNT GENMASK(15, 0)
+
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x084)
+#define MT_MIB_SDR28_TX_RWP_NEED_CNT GENMASK(15, 0)
+
+#define MT_MIB_RVSR1(_band) MT_WF_MIB(_band, 0x724)
+
+/* rx blockack count, 32 bits */
+#define MT_MIB_TSCR1(_band) MT_WF_MIB(_band, 0x6b4)
+
+#define MT_MIB_BTSCR0(_band) MT_WF_MIB(_band, 0x5e0)
+#define MT_MIB_BTSCR5(_band) MT_WF_MIB(_band, 0x788)
+#define MT_MIB_BTSCR6(_band) MT_WF_MIB(_band, 0x798)
+
+#define MT_MIB_BFTFCR(_band) MT_WF_MIB(_band, 0x5d0)
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa28 + ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
+#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 4)) & GENMASK(9, 0))
+
+/* UMIB */
+#define MT_WF_UMIB_BASE 0x820cd000
+#define MT_WF_UMIB(ofs) (MT_WF_UMIB_BASE + (ofs))
+
+#define MT_UMIB_RPDCR(_band) (MT_WF_UMIB(0x594) + (_band) * 0x164)
+
+/* WTBLON TOP */
+#define MT_WTBLON_TOP_BASE 0x820d4000
+#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x370)
+#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(4, 0)
+
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x380)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(11, 0)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(14)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+/* WTBL */
+#define MT_WTBL_BASE 0x820d8000
+#define MT_WTBL_LMAC_ID GENMASK(14, 8)
+#define MT_WTBL_LMAC_DW GENMASK(7, 2)
+#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+
+/* AGG: band 0(0x820e2000), band 1(0x820f2000), band 2(0x830e2000) */
+#define MT_WF_AGG_BASE(_band) __BASE(WF_AGG_BASE, (_band))
+#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
+
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x054)
+#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
+
+/* ARB: band 0(0x820e3000), band 1(0x820f3000), band 2(0x830e3000) */
+#define MT_WF_ARB_BASE(_band) __BASE(WF_ARB_BASE, (_band))
+#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
+
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x000)
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+
+/* RMAC: band 0(0x820e5000), band 1(0x820f5000), band 2(0x830e5000), */
+#define MT_WF_RMAC_BASE(_band) __BASE(WF_RMAC_BASE, (_band))
+#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
+
+#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_ED_OFFSET GENMASK(20, 16)
+#define MT_WF_RMAC_MIB_OBSS_BACKOFF GENMASK(15, 0)
+
+#define MT_WF_RMAC_MIB_AIRTIME1(_band) MT_WF_RMAC(_band, 0x0384)
+#define MT_WF_RMAC_MIB_NONQOSD_BACKOFF GENMASK(31, 16)
+
+#define MT_WF_RMAC_MIB_AIRTIME3(_band) MT_WF_RMAC(_band, 0x038c)
+#define MT_WF_RMAC_MIB_QOS01_BACKOFF GENMASK(31, 0)
+
+#define MT_WF_RMAC_MIB_AIRTIME4(_band) MT_WF_RMAC(_band, 0x0390)
+#define MT_WF_RMAC_MIB_QOS23_BACKOFF GENMASK(31, 0)
+
+#define MT_WF_RMAC_RSVD0(_band) MT_WF_RMAC(_band, 0x03e0)
+#define MT_WF_RMAC_RSVD0_EIFS_CLR BIT(21)
+
+/* WFDMA0 */
+#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
+
+#define MT_WFDMA0_RST MT_WFDMA0(0x100)
+#define MT_WFDMA0_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
+#define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
+
+#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
+#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
+
+#define WF_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
+#define WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD BIT(18)
+#define WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE BIT(14)
+
+#define WF_WFDMA0_GLO_CFG_EXT1 MT_WFDMA0(0x2b4)
+#define WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE BIT(31)
+#define WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE BIT(28)
+
+#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
+#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
+#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
+
+/* WFDMA1 */
+#define MT_WFDMA1_BASE 0xd5000
+
+/* WFDMA CSR */
+#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+
+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
+#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
+#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+
+#define MT_PCIE_RECOG_ID 0xd7090
+#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
+#define MT_PCIE_RECOG_ID_SEM BIT(31)
+
+/* WFDMA0 PCIE1 */
+#define MT_WFDMA0_PCIE1_BASE 0xd8000
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+/* WFDMA COMMON */
+#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
+#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
+
+#define MT_Q_ID(q) (dev->q_id[(q)])
+#define MT_Q_BASE(q) ((dev->q_wfdma_mask >> (q)) & 0x1 ? \
+ MT_WFDMA1_BASE : MT_WFDMA0_BASE)
+
+#define MT_MCUQ_ID(q) MT_Q_ID(q)
+#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
+#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
+
+#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
+#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
+#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+
+#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
+ MT_MCUQ_ID(q) * 0x4)
+#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+ MT_RXQ_ID(q) * 0x4)
+#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q) * 0x4)
+
+#define MT_INT_SOURCE_CSR MT_WFDMA0(0x200)
+#define MT_INT_MASK_CSR MT_WFDMA0(0x204)
+
+#define MT_INT1_SOURCE_CSR MT_WFDMA0_PCIE1(0x200)
+#define MT_INT1_MASK_CSR MT_WFDMA0_PCIE1(0x204)
+
+#define MT_INT_RX_DONE_BAND0 BIT(12)
+#define MT_INT_RX_DONE_BAND1 BIT(12)
+#define MT_INT_RX_DONE_BAND2 BIT(13)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE_WA_MAIN BIT(2)
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_RX_DONE_WA_TRI BIT(3)
+#define MT_INT_RX_TXFREE_MAIN BIT(17)
+#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_MCU_CMD BIT(29)
+
+#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
+ MT_INT_RX(MT_RXQ_MCU_WA))
+
+#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
+ MT_INT_RX(MT_RXQ_BAND1_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \
+ MT_INT_RX(MT_RXQ_BAND2_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
+ MT_INT_BAND0_RX_DONE | \
+ MT_INT_BAND1_RX_DONE | \
+ MT_INT_BAND2_RX_DONE)
+
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_MCU_WA BIT(22)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_TX_DONE_BAND2 BIT(15)
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+ MT_INT_TX_MCU(MT_MCUQ_FWDL))
+
+#define MT_MCU_CMD MT_WFDMA0(0x1f0)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+/* l1/l2 remap */
+#define MT_HIF_REMAP_L1 0x155024
+#define MT_HIF_REMAP_L1_MASK GENMASK(31, 16)
+#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L1 0x130000
+
+#define MT_HIF_REMAP_L2 0x1b4
+#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
+#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
+#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
+#define MT_HIF_REMAP_BASE_L2 0x1000
+
+#define MT_INFRA_BASE 0x18000000
+#define MT_WFSYS0_PHY_START 0x18400000
+#define MT_WFSYS1_PHY_START 0x18800000
+#define MT_WFSYS1_PHY_END 0x18bfffff
+#define MT_CBTOP1_PHY_START 0x70000000
+#define MT_CBTOP1_PHY_END 0x77ffffff
+#define MT_CBTOP2_PHY_START 0xf0000000
+#define MT_CBTOP2_PHY_END 0xffffffff
+#define MT_INFRA_MCU_START 0x7c000000
+#define MT_INFRA_MCU_END 0x7c3fffff
+
+/* FW MODE SYNC */
+#define MT_SWDEF_MODE 0x9143c
+#define MT_SWDEF_NORMAL_MODE 0
+
+/* LED */
+#define MT_LED_TOP_BASE 0x18013000
+#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
+
+#define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4))
+#define MT_LED_CTRL_KICK BIT(7)
+#define MT_LED_CTRL_BLINK_MODE BIT(2)
+#define MT_LED_CTRL_POLARITY BIT(1)
+
+#define MT_LED_TX_BLINK(_n) MT_LED_PHYS(0x10 + ((_n) * 4))
+#define MT_LED_TX_BLINK_ON_MASK GENMASK(7, 0)
+#define MT_LED_TX_BLINK_OFF_MASK GENMASK(15, 8)
+
+#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
+#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+
+/* MT TOP */
+#define MT_TOP_BASE 0xe0000
+#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
+
+#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
+#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
+
+#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
+
+#define MT_TOP_MISC MT_TOP(0xf0)
+#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+
+#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
+
+/* PCIE MAC */
+#define MT_PCIE_MAC_BASE 0x74030000
+#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
+#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+
+#define MT_PCIE1_MAC_BASE 0x74090000
+#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
+
+#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
+
+/* PHYRX CTRL */
+#define MT_WF_PHYRX_BAND_BASE 0x83080000
+#define MT_WF_PHYRX_BAND(_band, ofs) (MT_WF_PHYRX_BAND_BASE + \
+ ((_band) << 20) + (ofs))
+
+#define MT_WF_PHYRX_BAND_RX_CTRL1(_band) MT_WF_PHYRX_BAND(_band, 0x2004)
+#define MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN GENMASK(2, 0)
+#define MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
+
+/* PHYRX CSD */
+#define MT_WF_PHYRX_CSD_BASE 0x83000000
+#define MT_WF_PHYRX_CSD(_band, _wf, ofs) (MT_WF_PHYRX_CSD_BASE + \
+ ((_band) << 20) + \
+ ((_wf) << 16) + (ofs))
+#define MT_WF_PHYRX_CSD_IRPI(_band, _wf) MT_WF_PHYRX_CSD(_band, _wf, 0x1000)
+
+/* PHYRX CSD BAND */
+#define MT_WF_PHYRX_CSD_BAND_RXTD12(_band) MT_WF_PHYRX_BAND(_band, 0x8230)
+#define MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
+#define MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR BIT(29)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 0ec308f99af5..228bc7d45011 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -395,7 +395,7 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
if (!e || !e->skb)
break;
- dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL);
e->skb = NULL;
nframes++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 6c054850363f..24568b98ed9d 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -756,6 +756,23 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
}
EXPORT_SYMBOL_GPL(mt76_token_consume);
+int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+ struct mt76_txwi_cache *t, dma_addr_t phys)
+{
+ int token;
+
+ spin_lock_bh(&dev->rx_token_lock);
+ token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
+ GFP_ATOMIC);
+ spin_unlock_bh(&dev->rx_token_lock);
+
+ t->ptr = ptr;
+ t->dma_addr = phys;
+
+ return token;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
+
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
{
@@ -784,3 +801,16 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
return txwi;
}
EXPORT_SYMBOL_GPL(mt76_token_release);
+
+struct mt76_txwi_cache *
+mt76_rx_token_release(struct mt76_dev *dev, int token)
+{
+ struct mt76_txwi_cache *t;
+
+ spin_lock_bh(&dev->rx_token_lock);
+ t = idr_remove(&dev->rx_token, token);
+ spin_unlock_bh(&dev->rx_token_lock);
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_token_release);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 4c4033bb1bb3..3e281715fcd4 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -547,7 +547,7 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
len -= data_len;
nsgs++;
}
- dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
return nsgs;
}
@@ -766,6 +766,9 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_queue *q;
int i;
+ if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ return;
+
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
if (!q)
@@ -785,11 +788,11 @@ static void mt76u_status_worker(struct mt76_worker *w)
wake_up(&dev->tx_wait);
mt76_worker_schedule(&dev->tx_worker);
-
- if (dev->drv->tx_status_data &&
- !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
- queue_work(dev->wq, &dev->usb.stat_work);
}
+
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->usb.stat_work);
}
static void mt76u_tx_status_data(struct work_struct *work)
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index 49c52d781f40..260965dde94c 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -29,12 +29,6 @@ enum {
int mt76_wcid_alloc(u32 *mask, int size);
-static inline bool
-mt76_wcid_mask_test(u32 *mask, int idx)
-{
- return mask[idx / 32] & BIT(idx % 32);
-}
-
static inline void
mt76_wcid_mask_set(u32 *mask, int idx)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 6c9c7a61c5c9..c8d332456a6b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -406,6 +406,7 @@ out:
const struct ieee80211_ops mt7601u_ops = {
.tx = mt7601u_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mt7601u_start,
.stop = mt7601u_stop,
.add_interface = mt7601u_add_interface,
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 7390f94cd4ca..a05bda7b9a3b 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) },
{ },
};
+MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
#define WILC_SDIO_BLOCK_SIZE 512
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index d3cdffbded69..94ee831b5de3 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -686,6 +686,7 @@ static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
static const struct ieee80211_ops plfxlc_ops = {
.tx = plfxlc_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = plfxlc_op_start,
.stop = plfxlc_op_stop,
.add_interface = plfxlc_op_add_interface,
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 39e54b3787d6..76d0a778636a 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -247,6 +247,7 @@ error:
for (i = 0; i < RX_URBS_COUNT; i++)
free_rx_urb(urbs[i]);
}
+ kfree(urbs);
return r;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index ddfc16de1b26..13dd672b825e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1706,6 +1706,7 @@ static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index cd6371e25062..ecddda4c471e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -2004,6 +2004,7 @@ static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 4f3b0e6c6256..13fdcff0ad66 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -1795,6 +1795,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index 1fde0e767ce3..dcb56f708a5f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -288,6 +288,7 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index bbfe1425c0ee..7118d4f9038d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -133,6 +133,7 @@ static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index a3ffd1b0c9bc..b2a8e75a901b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -630,6 +630,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 81db7f57c7e4..483723bf514b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2873,6 +2873,7 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static const struct ieee80211_ops rt61pci_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 861035444374..dfa9d5213898 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2292,6 +2292,7 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static const struct ieee80211_ops rt73usb_mac80211_ops = {
.tx = rt2x00mac_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index cdfe08078c57..f6c25a52b69a 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1608,6 +1608,7 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8180_start,
.stop = rtl8180_stop,
.add_interface = rtl8180_add_interface,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index c0f6e9c6d03e..04945f905d6d 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1378,6 +1378,7 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev,
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8187_start,
.stop = rtl8187_stop,
.add_interface = rtl8187_add_interface,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
index a263507a77a6..631d078278be 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -3,13 +3,14 @@
# RTL8XXXU Wireless LAN device configuration
#
config RTL8XXXU
- tristate "RTL8723AU/RTL8188[CR]U/RTL819[12]CU (mac80211) support"
+ tristate "Realtek 802.11n USB wireless chips support"
depends on MAC80211 && USB
help
This is an alternative driver for various Realtek RTL8XXX
parts written to utilize the Linux mac80211 stack.
The driver is known to work with a number of RTL8723AU,
- RL8188CU, RTL8188RU, RTL8191CU, and RTL8192CU devices
+ RL8188CU, RTL8188RU, RTL8191CU, RTL8192CU, RTL8723BU, RTL8192EU,
+ and RTL8188FU devices.
This driver is under development and has a limited feature
set. In particular it does not yet support 40MHz channels
@@ -22,7 +23,7 @@ config RTL8XXXU
but you will need to control which module you wish to load.
To compile this driver as a module, choose M here: the module will
- be called r8xxxu. If unsure, say N.
+ be called rtl8xxxu. If unsure, say N.
config RTL8XXXU_UNTESTED
bool "Include support for untested Realtek 8xxx USB devices (EXPERIMENTAL)"
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index b278f8697cc0..c4ad5325f5e7 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
- rtl8xxxu_8723a.o rtl8xxxu_8192c.o
+ rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 782b089a2e1b..136992f0200c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -35,6 +35,7 @@
#define REALTEK_USB_CMD_IDX 0x00
#define TX_TOTAL_PAGE_NUM 0xf8
+#define TX_TOTAL_PAGE_NUM_8188F 0xf7
#define TX_TOTAL_PAGE_NUM_8192E 0xf3
#define TX_TOTAL_PAGE_NUM_8723B 0xf7
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
@@ -43,6 +44,11 @@
#define TX_PAGE_NUM_LO_PQ 0x02
#define TX_PAGE_NUM_NORM_PQ 0x02
+#define TX_PAGE_NUM_PUBQ_8188F 0xe5
+#define TX_PAGE_NUM_HI_PQ_8188F 0x0c
+#define TX_PAGE_NUM_LO_PQ_8188F 0x02
+#define TX_PAGE_NUM_NORM_PQ_8188F 0x02
+
#define TX_PAGE_NUM_PUBQ_8192E 0xe7
#define TX_PAGE_NUM_HI_PQ_8192E 0x08
#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
@@ -564,7 +570,7 @@ struct rtl8723au_phy_stats {
u8 cck_rpt_b_ofdm_cfosho_b;
u8 reserved_1;
u8 noise_power_db_msb;
- u8 path_cfotail[RTL8723A_MAX_RF_PATHS];
+ s8 path_cfotail[RTL8723A_MAX_RF_PATHS];
u8 pcts_mask[RTL8723A_MAX_RF_PATHS];
s8 stream_rxevm[RTL8723A_MAX_RF_PATHS];
u8 path_rxsnr[RTL8723A_MAX_RF_PATHS];
@@ -859,6 +865,50 @@ struct rtl8192eu_efuse {
u8 res12[0xc3];
};
+struct rtl8188fu_efuse_tx_power {
+ u8 cck_base[6];
+ u8 ht40_base[5];
+ /* a: ofdm; b: ht20 */
+ struct rtl8723au_idx ht20_ofdm_1s_diff;
+};
+
+struct rtl8188fu_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+ struct rtl8188fu_efuse_tx_power tx_power_index_A; /* 0x10 */
+ u8 res1[0x9c]; /* 0x1c */
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 res2[5];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 res3[2];
+ u8 kfree_thermal_k_on;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code;
+ u8 res4[4];
+ u8 vid; /* 0xd0 */
+ u8 res5[1];
+ u8 pid; /* 0xd2 */
+ u8 res6[1];
+ u8 usb_optional_function;
+ u8 res7[2];
+ u8 mac_addr[ETH_ALEN]; /* 0xd7 */
+ u8 res8[2];
+ u8 vendor_name[7];
+ u8 res9[2];
+ u8 device_name[7]; /* 0xe8 */
+ u8 res10[0x41];
+ u8 unknown[0x0d]; /* 0x130 */
+ u8 res11[0xc3];
+};
+
struct rtl8xxxu_reg8val {
u16 reg;
u8 val;
@@ -1273,6 +1323,19 @@ struct rtl8xxxu_ra_report {
u8 desc_rate;
};
+#define CFO_TH_XTAL_HIGH 20 /* kHz */
+#define CFO_TH_XTAL_LOW 10 /* kHz */
+#define CFO_TH_ATC 80 /* kHz */
+
+struct rtl8xxxu_cfo_tracking {
+ bool adjust;
+ bool atc_status;
+ int cfo_tail[2];
+ u8 crystal_cap;
+ u32 packet_count;
+ u32 packet_count_pre;
+};
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1331,9 +1394,9 @@ struct rtl8xxxu_priv {
u32 ep_tx_high_queue:1;
u32 ep_tx_normal_queue:1;
u32 ep_tx_low_queue:1;
- u32 has_xtalk:1;
u32 rx_buf_aggregation:1;
- u8 xtalk;
+ u32 cck_agc_report_type:1;
+ u8 default_crystal_cap;
unsigned int pipe_interrupt;
unsigned int pipe_in;
unsigned int pipe_out[TXDESC_QUEUE_MAX];
@@ -1368,6 +1431,7 @@ struct rtl8xxxu_priv {
struct rtl8723bu_efuse efuse8723bu;
struct rtl8192cu_efuse efuse8192;
struct rtl8192eu_efuse efuse8192eu;
+ struct rtl8188fu_efuse efuse8188fu;
} efuse_wifi;
u32 adda_backup[RTL8XXXU_ADDA_REGS];
u32 mac_backup[RTL8XXXU_MAC_REGS];
@@ -1390,6 +1454,7 @@ struct rtl8xxxu_priv {
struct sk_buff_head c2hcmd_queue;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
+ struct rtl8xxxu_cfo_tracking cfo_tracking;
};
struct rtl8xxxu_rx_urb {
@@ -1405,6 +1470,7 @@ struct rtl8xxxu_tx_urb {
};
struct rtl8xxxu_fileops {
+ int (*identify_chip) (struct rtl8xxxu_priv *priv);
int (*parse_efuse) (struct rtl8xxxu_priv *priv);
int (*load_firmware) (struct rtl8xxxu_priv *priv);
int (*power_on) (struct rtl8xxxu_priv *priv);
@@ -1414,11 +1480,13 @@ struct rtl8xxxu_fileops {
void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
+ void (*phy_lc_calibrate) (struct rtl8xxxu_priv *priv);
void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
void (*config_channel) (struct ieee80211_hw *hw);
int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb);
void (*init_aggregation) (struct rtl8xxxu_priv *priv);
void (*init_statistics) (struct rtl8xxxu_priv *priv);
+ void (*init_burst) (struct rtl8xxxu_priv *priv);
void (*enable_rf) (struct rtl8xxxu_priv *priv);
void (*disable_rf) (struct rtl8xxxu_priv *priv);
void (*usb_quirks) (struct rtl8xxxu_priv *priv);
@@ -1433,6 +1501,8 @@ struct rtl8xxxu_fileops {
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
+ void (*set_crystal_cap) (struct rtl8xxxu_priv *priv, u8 crystal_cap);
+ s8 (*cck_rssi) (struct rtl8xxxu_priv *priv, u8 cck_agc_rpt);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
@@ -1448,7 +1518,7 @@ struct rtl8xxxu_fileops {
u16 trxff_boundary;
u8 pbp_rx;
u8 pbp_tx;
- struct rtl8xxxu_reg8val *mactable;
+ const struct rtl8xxxu_reg8val *mactable;
u8 total_page_num;
u8 page_num_hi;
u8 page_num_lo;
@@ -1457,7 +1527,7 @@ struct rtl8xxxu_fileops {
extern int rtl8xxxu_debug;
-extern struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[];
+extern const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[];
extern const u32 rtl8xxxu_iqk_phy_iq_bb_reg[];
u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr);
u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr);
@@ -1486,16 +1556,22 @@ void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, bool iqk_ok,
void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
int result[][8], int candidate, bool tx_only);
int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_rfregval *table,
+ const struct rtl8xxxu_rfregval *table,
enum rtl8xxxu_rfpath path);
int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_reg32val *array);
+ const struct rtl8xxxu_reg32val *array);
int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_identify_vendor_1bit(struct rtl8xxxu_priv *priv, u32 vendor);
+void rtl8xxxu_identify_vendor_2bits(struct rtl8xxxu_priv *priv, u32 vendor);
+void rtl8xxxu_config_endpoints_sie(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_config_endpoints_no_sie(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data);
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv);
int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start);
+void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv);
int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv);
int rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv,
struct h2c_cmd *h2c, int len);
@@ -1522,6 +1598,7 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_init_burst(struct rtl8xxxu_priv *priv);
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
@@ -1539,7 +1616,11 @@ void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
u32 rts_rate);
void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
+void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv);
+void rtl8723a_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap);
+s8 rtl8723a_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt);
+extern struct rtl8xxxu_fileops rtl8188fu_fops;
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
extern struct rtl8xxxu_fileops rtl8723au_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
new file mode 100644
index 000000000000..2c4f403ba68f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -0,0 +1,1766 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RTL8XXXU mac80211 USB driver - 8188f specific subdriver
+ *
+ * Copyright (c) 2022 Bitterblue Smith <rtl8821cerfe2@gmail.com>
+ *
+ * Portions copied from existing rtl8xxxu code:
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static const struct rtl8xxxu_reg8val rtl8188f_mac_init_table[] = {
+ {0x024, 0xDF}, {0x025, 0x07}, {0x02B, 0x1C}, {0x283, 0x20},
+ {0x421, 0x0F}, {0x428, 0x0A}, {0x429, 0x10}, {0x430, 0x00},
+ {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04},
+ {0x435, 0x05}, {0x436, 0x07}, {0x437, 0x08}, {0x43C, 0x04},
+ {0x43D, 0x05}, {0x43E, 0x07}, {0x43F, 0x08}, {0x440, 0x5D},
+ {0x441, 0x01}, {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00},
+ {0x446, 0x00}, {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xF0},
+ {0x44A, 0x0F}, {0x44B, 0x3E}, {0x44C, 0x10}, {0x44D, 0x00},
+ {0x44E, 0x00}, {0x44F, 0x00}, {0x450, 0x00}, {0x451, 0xF0},
+ {0x452, 0x0F}, {0x453, 0x00}, {0x456, 0x5E}, {0x460, 0x44},
+ {0x461, 0x44}, {0x4BC, 0xC0}, {0x4C8, 0xFF}, {0x4C9, 0x08},
+ {0x4CC, 0xFF}, {0x4CD, 0xFF}, {0x4CE, 0x01}, {0x500, 0x26},
+ {0x501, 0xA2}, {0x502, 0x2F}, {0x503, 0x00}, {0x504, 0x28},
+ {0x505, 0xA3}, {0x506, 0x5E}, {0x507, 0x00}, {0x508, 0x2B},
+ {0x509, 0xA4}, {0x50A, 0x5E}, {0x50B, 0x00}, {0x50C, 0x4F},
+ {0x50D, 0xA4}, {0x50E, 0x00}, {0x50F, 0x00}, {0x512, 0x1C},
+ {0x514, 0x0A}, {0x516, 0x0A}, {0x525, 0x4F}, {0x550, 0x10},
+ {0x551, 0x10}, {0x559, 0x02}, {0x55C, 0x28}, {0x55D, 0xFF},
+ {0x605, 0x30}, {0x608, 0x0E}, {0x609, 0x2A}, {0x620, 0xFF},
+ {0x621, 0xFF}, {0x622, 0xFF}, {0x623, 0xFF}, {0x624, 0xFF},
+ {0x625, 0xFF}, {0x626, 0xFF}, {0x627, 0xFF}, {0x638, 0x28},
+ {0x63C, 0x0A}, {0x63D, 0x0A}, {0x63E, 0x0E}, {0x63F, 0x0E},
+ {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xC8},
+ {0x66E, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65},
+ {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70A, 0x65},
+ {0x70B, 0x87},
+ {0xffff, 0xff},
+};
+
+static const struct rtl8xxxu_reg32val rtl8188fu_phy_init_table[] = {
+ {0x800, 0x80045700}, {0x804, 0x00000001},
+ {0x808, 0x0000FC00}, {0x80C, 0x0000000A},
+ {0x810, 0x10001331}, {0x814, 0x020C3D10},
+ {0x818, 0x00200385}, {0x81C, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390204},
+ {0x828, 0x00000000}, {0x82C, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83C, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84C, 0x00000000},
+ {0x850, 0x00030000}, {0x854, 0x00000000},
+ {0x858, 0x569A569A}, {0x85C, 0x569A569A},
+ {0x860, 0x00000130}, {0x864, 0x00000000},
+ {0x868, 0x00000000}, {0x86C, 0x27272700},
+ {0x870, 0x00000000}, {0x874, 0x25004000},
+ {0x878, 0x00000808}, {0x87C, 0x004F0201},
+ {0x880, 0xB0000B1E}, {0x884, 0x00000007},
+ {0x888, 0x00000000}, {0x88C, 0xCCC000C0},
+ {0x890, 0x00000800}, {0x894, 0xFFFFFFFE},
+ {0x898, 0x40302010}, {0x89C, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90C, 0x81121111},
+ {0x910, 0x00000002}, {0x914, 0x00000201},
+ {0x948, 0x99000000}, {0x94C, 0x00000010},
+ {0x950, 0x20003000}, {0x954, 0x4A880000},
+ {0x958, 0x4BC5D87A}, {0x95C, 0x04EB9B79},
+ {0x96C, 0x00000003}, {0xA00, 0x00D047C8},
+ {0xA04, 0x80FF800C}, {0xA08, 0x8C898300},
+ {0xA0C, 0x2E7F120F}, {0xA10, 0x9500BB78},
+ {0xA14, 0x1114D028}, {0xA18, 0x00881117},
+ {0xA1C, 0x89140F00}, {0xA20, 0xD1D80000},
+ {0xA24, 0x5A7DA0BD}, {0xA28, 0x0000223B},
+ {0xA2C, 0x00D30000}, {0xA70, 0x101FBF00},
+ {0xA74, 0x00000007}, {0xA78, 0x00000900},
+ {0xA7C, 0x225B0606}, {0xA80, 0x218075B1},
+ {0xA84, 0x00120000}, {0xA88, 0x040C0000},
+ {0xA8C, 0x12345678}, {0xA90, 0xABCDEF00},
+ {0xA94, 0x001B1B89}, {0xA98, 0x05100000},
+ {0xA9C, 0x3F000000}, {0xAA0, 0x00000000},
+ {0xB2C, 0x00000000}, {0xC00, 0x48071D40},
+ {0xC04, 0x03A05611}, {0xC08, 0x000000E4},
+ {0xC0C, 0x6C6C6C6C}, {0xC10, 0x18800000},
+ {0xC14, 0x40000100}, {0xC18, 0x08800000},
+ {0xC1C, 0x40000100}, {0xC20, 0x00000000},
+ {0xC24, 0x00000000}, {0xC28, 0x00000000},
+ {0xC2C, 0x00000000}, {0xC30, 0x69E9CC4A},
+ {0xC34, 0x31000040}, {0xC38, 0x21688080},
+ {0xC3C, 0x00001714}, {0xC40, 0x1F78403F},
+ {0xC44, 0x00010036}, {0xC48, 0xEC020107},
+ {0xC4C, 0x007F037F}, {0xC50, 0x69553420},
+ {0xC54, 0x43BC0094}, {0xC58, 0x00013169},
+ {0xC5C, 0x00250492}, {0xC60, 0x00000000},
+ {0xC64, 0x7112848B}, {0xC68, 0x47C07BFF},
+ {0xC6C, 0x00000036}, {0xC70, 0x2C7F000D},
+ {0xC74, 0x020600DB}, {0xC78, 0x0000001F},
+ {0xC7C, 0x00B91612}, {0xC80, 0x390000E4},
+ {0xC84, 0x11F60000},
+ {0xC88, 0x40000100}, {0xC8C, 0x20200000},
+ {0xC90, 0x00091521}, {0xC94, 0x00000000},
+ {0xC98, 0x00121820}, {0xC9C, 0x00007F7F},
+ {0xCA0, 0x00000000}, {0xCA4, 0x000300A0},
+ {0xCA8, 0x00000000}, {0xCAC, 0x00000000},
+ {0xCB0, 0x00000000}, {0xCB4, 0x00000000},
+ {0xCB8, 0x00000000}, {0xCBC, 0x28000000},
+ {0xCC0, 0x00000000}, {0xCC4, 0x00000000},
+ {0xCC8, 0x00000000}, {0xCCC, 0x00000000},
+ {0xCD0, 0x00000000}, {0xCD4, 0x00000000},
+ {0xCD8, 0x64B22427}, {0xCDC, 0x00766932},
+ {0xCE0, 0x00222222}, {0xCE4, 0x10000000},
+ {0xCE8, 0x37644302}, {0xCEC, 0x2F97D40C},
+ {0xD00, 0x04030740}, {0xD04, 0x40020401},
+ {0xD08, 0x0000907F}, {0xD0C, 0x20010201},
+ {0xD10, 0xA0633333}, {0xD14, 0x3333BC53},
+ {0xD18, 0x7A8F5B6F}, {0xD2C, 0xCB979975},
+ {0xD30, 0x00000000}, {0xD34, 0x80608000},
+ {0xD38, 0x98000000}, {0xD3C, 0x40127353},
+ {0xD40, 0x00000000}, {0xD44, 0x00000000},
+ {0xD48, 0x00000000}, {0xD4C, 0x00000000},
+ {0xD50, 0x6437140A}, {0xD54, 0x00000000},
+ {0xD58, 0x00000282}, {0xD5C, 0x30032064},
+ {0xD60, 0x4653DE68}, {0xD64, 0x04518A3C},
+ {0xD68, 0x00002101}, {0xD6C, 0x2A201C16},
+ {0xD70, 0x1812362E}, {0xD74, 0x322C2220},
+ {0xD78, 0x000E3C24}, {0xE00, 0x2D2D2D2D},
+ {0xE04, 0x2D2D2D2D}, {0xE08, 0x0390272D},
+ {0xE10, 0x2D2D2D2D}, {0xE14, 0x2D2D2D2D},
+ {0xE18, 0x2D2D2D2D}, {0xE1C, 0x2D2D2D2D},
+ {0xE28, 0x00000000}, {0xE30, 0x1000DC1F},
+ {0xE34, 0x10008C1F}, {0xE38, 0x02140102},
+ {0xE3C, 0x681604C2}, {0xE40, 0x01007C00},
+ {0xE44, 0x01004800}, {0xE48, 0xFB000000},
+ {0xE4C, 0x000028D1}, {0xE50, 0x1000DC1F},
+ {0xE54, 0x10008C1F}, {0xE58, 0x02140102},
+ {0xE5C, 0x28160D05}, {0xE60, 0x00000008},
+ {0xE60, 0x021400A0}, {0xE64, 0x281600A0},
+ {0xE6C, 0x01C00010}, {0xE70, 0x01C00010},
+ {0xE74, 0x02000010}, {0xE78, 0x02000010},
+ {0xE7C, 0x02000010}, {0xE80, 0x02000010},
+ {0xE84, 0x01C00010}, {0xE88, 0x02000010},
+ {0xE8C, 0x01C00010}, {0xED0, 0x01C00010},
+ {0xED4, 0x01C00010}, {0xED8, 0x01C00010},
+ {0xEDC, 0x00000010}, {0xEE0, 0x00000010},
+ {0xEEC, 0x03C00010}, {0xF14, 0x00000003},
+ {0xF4C, 0x00000000}, {0xF00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static const struct rtl8xxxu_reg32val rtl8188f_agc_table[] = {
+ {0xC78, 0xFC000001}, {0xC78, 0xFB010001},
+ {0xC78, 0xFA020001}, {0xC78, 0xF9030001},
+ {0xC78, 0xF8040001}, {0xC78, 0xF7050001},
+ {0xC78, 0xF6060001}, {0xC78, 0xF5070001},
+ {0xC78, 0xF4080001}, {0xC78, 0xF3090001},
+ {0xC78, 0xF20A0001}, {0xC78, 0xF10B0001},
+ {0xC78, 0xF00C0001}, {0xC78, 0xEF0D0001},
+ {0xC78, 0xEE0E0001}, {0xC78, 0xED0F0001},
+ {0xC78, 0xEC100001}, {0xC78, 0xEB110001},
+ {0xC78, 0xEA120001}, {0xC78, 0xE9130001},
+ {0xC78, 0xE8140001}, {0xC78, 0xE7150001},
+ {0xC78, 0xE6160001}, {0xC78, 0xE5170001},
+ {0xC78, 0xE4180001}, {0xC78, 0xE3190001},
+ {0xC78, 0xE21A0001}, {0xC78, 0xE11B0001},
+ {0xC78, 0xE01C0001}, {0xC78, 0xC21D0001},
+ {0xC78, 0xC11E0001}, {0xC78, 0xC01F0001},
+ {0xC78, 0xA5200001}, {0xC78, 0xA4210001},
+ {0xC78, 0xA3220001}, {0xC78, 0xA2230001},
+ {0xC78, 0xA1240001}, {0xC78, 0xA0250001},
+ {0xC78, 0x65260001}, {0xC78, 0x64270001},
+ {0xC78, 0x63280001}, {0xC78, 0x62290001},
+ {0xC78, 0x612A0001}, {0xC78, 0x442B0001},
+ {0xC78, 0x432C0001}, {0xC78, 0x422D0001},
+ {0xC78, 0x412E0001}, {0xC78, 0x402F0001},
+ {0xC78, 0x21300001}, {0xC78, 0x20310001},
+ {0xC78, 0x05320001}, {0xC78, 0x04330001},
+ {0xC78, 0x03340001}, {0xC78, 0x02350001},
+ {0xC78, 0x01360001}, {0xC78, 0x00370001},
+ {0xC78, 0x00380001}, {0xC78, 0x00390001},
+ {0xC78, 0x003A0001}, {0xC78, 0x003B0001},
+ {0xC78, 0x003C0001}, {0xC78, 0x003D0001},
+ {0xC78, 0x003E0001}, {0xC78, 0x003F0001},
+ {0xC50, 0x69553422}, {0xC50, 0x69553420},
+ {0xffff, 0xffffffff}
+};
+
+static const struct rtl8xxxu_rfregval rtl8188fu_radioa_init_table[] = {
+ {0x00, 0x00030000}, {0x08, 0x00008400},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1B, 0x00001C6C},
+ {0x1E, 0x00080009}, {0x1F, 0x00000880},
+ {0x2F, 0x0001A060}, {0x3F, 0x00028000},
+ {0x42, 0x000060C0}, {0x57, 0x000D0000},
+ {0x58, 0x000C0160}, {0x67, 0x00001552},
+ {0x83, 0x00000000}, {0xB0, 0x000FF9F0},
+ {0xB1, 0x00022218}, {0xB2, 0x00034C00},
+ {0xB4, 0x0004484B}, {0xB5, 0x0000112A},
+ {0xB6, 0x0000053E}, {0xB7, 0x00010408},
+ {0xB8, 0x00010200}, {0xB9, 0x00080001},
+ {0xBA, 0x00040001}, {0xBB, 0x00000400},
+ {0xBF, 0x000C0000}, {0xC2, 0x00002400},
+ {0xC3, 0x00000009}, {0xC4, 0x00040C91},
+ {0xC5, 0x00099999}, {0xC6, 0x000000A3},
+ {0xC7, 0x0008F820}, {0xC8, 0x00076C06},
+ {0xC9, 0x00000000}, {0xCA, 0x00080000},
+ {0xDF, 0x00000180}, {0xEF, 0x000001A0},
+ {0x51, 0x000E8333}, {0x52, 0x000FAC2C},
+ {0x53, 0x00000103}, {0x56, 0x000517F0},
+ {0x35, 0x00000099}, {0x35, 0x00000199},
+ {0x35, 0x00000299}, {0x36, 0x00000064},
+ {0x36, 0x00008064}, {0x36, 0x00010064},
+ {0x36, 0x00018064}, {0x18, 0x00000C07},
+ {0x5A, 0x00048000}, {0x19, 0x000739D0},
+ {0x34, 0x0000ADD6}, {0x34, 0x00009DD3},
+ {0x34, 0x00008CF4}, {0x34, 0x00007CF1},
+ {0x34, 0x00006CEE}, {0x34, 0x00005CEB},
+ {0x34, 0x00004CCE}, {0x34, 0x00003CCB},
+ {0x34, 0x00002CC8}, {0x34, 0x00001C4B},
+ {0x34, 0x00000C48},
+ {0x00, 0x00030159}, {0x84, 0x00048000},
+ {0x86, 0x0000002A}, {0x87, 0x00000025},
+ {0x8E, 0x00065540}, {0x8F, 0x00088000},
+ {0xEF, 0x000020A0}, {0x3B, 0x000F0F00},
+ {0x3B, 0x000E0B00}, {0x3B, 0x000D0900},
+ {0x3B, 0x000C0700}, {0x3B, 0x000B0600},
+ {0x3B, 0x000A0400}, {0x3B, 0x00090200},
+ {0x3B, 0x00080000}, {0x3B, 0x0007BF00},
+ {0x3B, 0x00060B00}, {0x3B, 0x0005C900},
+ {0x3B, 0x00040700}, {0x3B, 0x00030600},
+ {0x3B, 0x0002D500}, {0x3B, 0x00010200},
+ {0x3B, 0x0000E000}, {0xEF, 0x000000A0},
+ {0xEF, 0x00000010}, {0x3B, 0x0000C0A8},
+ {0x3B, 0x00010400}, {0xEF, 0x00000000},
+ {0xEF, 0x00080000}, {0x30, 0x00010000},
+ {0x31, 0x0000000F}, {0x32, 0x00007EFE},
+ {0xEF, 0x00000000}, {0x00, 0x00010159},
+ {0x18, 0x0000FC07}, {0xFE, 0x00000000},
+ {0xFE, 0x00000000}, {0x1F, 0x00080003},
+ {0xFE, 0x00000000}, {0xFE, 0x00000000},
+ {0x1E, 0x00000001}, {0x1F, 0x00080000},
+ {0x00, 0x00033D95},
+ {0xff, 0xffffffff}
+};
+
+static const struct rtl8xxxu_rfregval rtl8188fu_cut_b_radioa_init_table[] = {
+ {0x00, 0x00030000}, {0x08, 0x00008400},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1B, 0x00001C6C},
+ {0x1E, 0x00080009}, {0x1F, 0x00000880},
+ {0x2F, 0x0001A060}, {0x3F, 0x00028000},
+ {0x42, 0x000060C0}, {0x57, 0x000D0000},
+ {0x58, 0x000C0160}, {0x67, 0x00001552},
+ {0x83, 0x00000000}, {0xB0, 0x000FF9F0},
+ {0xB1, 0x00022218}, {0xB2, 0x00034C00},
+ {0xB4, 0x0004484B}, {0xB5, 0x0000112A},
+ {0xB6, 0x0000053E}, {0xB7, 0x00010408},
+ {0xB8, 0x00010200}, {0xB9, 0x00080001},
+ {0xBA, 0x00040001}, {0xBB, 0x00000400},
+ {0xBF, 0x000C0000}, {0xC2, 0x00002400},
+ {0xC3, 0x00000009}, {0xC4, 0x00040C91},
+ {0xC5, 0x00099999}, {0xC6, 0x000000A3},
+ {0xC7, 0x0008F820}, {0xC8, 0x00076C06},
+ {0xC9, 0x00000000}, {0xCA, 0x00080000},
+ {0xDF, 0x00000180}, {0xEF, 0x000001A0},
+ {0x51, 0x000E8231}, {0x52, 0x000FAC2C},
+ {0x53, 0x00000141}, {0x56, 0x000517F0},
+ {0x35, 0x00000090}, {0x35, 0x00000190},
+ {0x35, 0x00000290}, {0x36, 0x00001064},
+ {0x36, 0x00009064}, {0x36, 0x00011064},
+ {0x36, 0x00019064}, {0x18, 0x00000C07},
+ {0x5A, 0x00048000}, {0x19, 0x000739D0},
+ {0x34, 0x0000ADD2}, {0x34, 0x00009DD0},
+ {0x34, 0x00008CF3}, {0x34, 0x00007CF0},
+ {0x34, 0x00006CED}, {0x34, 0x00005CD2},
+ {0x34, 0x00004CCF}, {0x34, 0x00003CCC},
+ {0x34, 0x00002CC9}, {0x34, 0x00001C4C},
+ {0x34, 0x00000C49},
+ {0x00, 0x00030159}, {0x84, 0x00048000},
+ {0x86, 0x0000002A}, {0x87, 0x00000025},
+ {0x8E, 0x00065540}, {0x8F, 0x00088000},
+ {0xEF, 0x000020A0}, {0x3B, 0x000F0F00},
+ {0x3B, 0x000E0B00}, {0x3B, 0x000D0900},
+ {0x3B, 0x000C0700}, {0x3B, 0x000B0600},
+ {0x3B, 0x000A0400}, {0x3B, 0x00090200},
+ {0x3B, 0x00080000}, {0x3B, 0x0007BF00},
+ {0x3B, 0x00060B00}, {0x3B, 0x0005C900},
+ {0x3B, 0x00040700}, {0x3B, 0x00030600},
+ {0x3B, 0x0002D500}, {0x3B, 0x00010200},
+ {0x3B, 0x0000E000}, {0xEF, 0x000000A0},
+ {0xEF, 0x00000010}, {0x3B, 0x0000C0A8},
+ {0x3B, 0x00010400}, {0xEF, 0x00000000},
+ {0xEF, 0x00080000}, {0x30, 0x00010000},
+ {0x31, 0x0000000F}, {0x32, 0x00007EFE},
+ {0xEF, 0x00000000}, {0x00, 0x00010159},
+ {0x18, 0x0000FC07}, {0xFE, 0x00000000},
+ {0xFE, 0x00000000}, {0x1F, 0x00080003},
+ {0xFE, 0x00000000}, {0xFE, 0x00000000},
+ {0x1E, 0x00000001}, {0x1F, 0x00080000},
+ {0x00, 0x00033D95},
+ {0xff, 0xffffffff}
+};
+
+static int rtl8188fu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 sys_cfg, vendor;
+ int ret = 0;
+
+ strscpy(priv->chip_name, "8188FU", sizeof(priv->chip_name));
+ priv->rtl_chip = RTL8188F;
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+ priv->has_wifi = 1;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_EXT_MASK;
+ rtl8xxxu_identify_vendor_2bits(priv, vendor);
+
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+out:
+ return ret;
+}
+
+static void rtl8188f_channel_to_group(int channel, int *group, int *cck_group)
+{
+ if (channel < 3)
+ *group = 0;
+ else if (channel < 6)
+ *group = 1;
+ else if (channel < 9)
+ *group = 2;
+ else if (channel < 12)
+ *group = 3;
+ else
+ *group = 4;
+
+ if (channel == 14)
+ *cck_group = 5;
+ else
+ *cck_group = *group;
+}
+
+static void
+rtl8188f_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u32 val32, ofdm, mcs;
+ u8 cck, ofdmbase, mcsbase;
+ int group, cck_group;
+
+ rtl8188f_channel_to_group(channel, &group, &cck_group);
+
+ cck = priv->cck_tx_power_index_A[cck_group];
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_A[group];
+ ofdmbase += priv->ofdm_tx_power_diff[0].a;
+ ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_A[group];
+ if (ht40)
+ /* This diff is always 0 - not used in 8188FU. */
+ mcsbase += priv->ht40_tx_power_diff[0].a;
+ else
+ mcsbase += priv->ht20_tx_power_diff[0].a;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs);
+}
+
+/* A workaround to eliminate the 2400MHz, 2440MHz, 2480MHz spur of 8188F. */
+static void rtl8188f_spur_calibration(struct rtl8xxxu_priv *priv, u8 channel)
+{
+ static const u32 frequencies[14 + 1] = {
+ [5] = 0xFCCD,
+ [6] = 0xFC4D,
+ [7] = 0xFFCD,
+ [8] = 0xFF4D,
+ [11] = 0xFDCD,
+ [13] = 0xFCCD,
+ [14] = 0xFF9A
+ };
+
+ static const u32 reg_d40[14 + 1] = {
+ [5] = 0x06000000,
+ [6] = 0x00000600,
+ [13] = 0x06000000
+ };
+
+ static const u32 reg_d44[14 + 1] = {
+ [11] = 0x04000000
+ };
+
+ static const u32 reg_d4c[14 + 1] = {
+ [7] = 0x06000000,
+ [8] = 0x00000380,
+ [14] = 0x00180000
+ };
+
+ const u8 threshold = 0x16;
+ bool do_notch, hw_ctrl, sw_ctrl, hw_ctrl_s1 = 0, sw_ctrl_s1 = 0;
+ u32 val32, initial_gain, reg948;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_D_SYNC_PATH);
+ val32 |= GENMASK(28, 24);
+ rtl8xxxu_write32(priv, REG_OFDM0_RX_D_SYNC_PATH, val32);
+
+ /* enable notch filter */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_D_SYNC_PATH);
+ val32 |= BIT(9);
+ rtl8xxxu_write32(priv, REG_OFDM0_RX_D_SYNC_PATH, val32);
+
+ if (channel <= 14 && frequencies[channel] > 0) {
+ reg948 = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+ hw_ctrl = reg948 & BIT(6);
+ sw_ctrl = !hw_ctrl;
+
+ if (hw_ctrl) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+ val32 &= GENMASK(5, 3);
+ hw_ctrl_s1 = val32 == BIT(3);
+ } else if (sw_ctrl) {
+ sw_ctrl_s1 = !(reg948 & BIT(9));
+ }
+
+ if (hw_ctrl_s1 || sw_ctrl_s1) {
+ initial_gain = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+
+ /* Disable CCK block */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE_CCK;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = initial_gain & ~OFDM0_X_AGC_CORE1_IGI_MASK;
+ val32 |= 0x30;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32);
+
+ /* disable 3-wire */
+ rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0);
+
+ /* Setup PSD */
+ rtl8xxxu_write32(priv, REG_FPGA0_PSD_FUNC, frequencies[channel]);
+
+ /* Start PSD */
+ rtl8xxxu_write32(priv, REG_FPGA0_PSD_FUNC, 0x400000 | frequencies[channel]);
+
+ msleep(30);
+
+ do_notch = rtl8xxxu_read32(priv, REG_FPGA0_PSD_REPORT) >= threshold;
+
+ /* turn off PSD */
+ rtl8xxxu_write32(priv, REG_FPGA0_PSD_FUNC, frequencies[channel]);
+
+ /* enable 3-wire */
+ rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccc000c0);
+
+ /* Enable CCK block */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= FPGA_RF_MODE_CCK;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, initial_gain);
+
+ if (do_notch) {
+ rtl8xxxu_write32(priv, REG_OFDM1_CSI_FIX_MASK1, reg_d40[channel]);
+ rtl8xxxu_write32(priv, REG_OFDM1_CSI_FIX_MASK2, reg_d44[channel]);
+ rtl8xxxu_write32(priv, 0xd48, 0x0);
+ rtl8xxxu_write32(priv, 0xd4c, reg_d4c[channel]);
+
+ /* enable CSI mask */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_CFO_TRACKING);
+ val32 |= BIT(28);
+ rtl8xxxu_write32(priv, REG_OFDM1_CFO_TRACKING, val32);
+
+ return;
+ }
+ }
+ }
+
+ /* disable CSI mask function */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_CFO_TRACKING);
+ val32 &= ~BIT(28);
+ rtl8xxxu_write32(priv, REG_OFDM1_CFO_TRACKING, val32);
+}
+
+static void rtl8188fu_config_channel(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u32 val32;
+ u8 channel, subchannel;
+ bool sec_ch_above;
+
+ channel = (u8)hw->conf.chandef.chan->hw_value;
+
+ /* Set channel */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
+ val32 &= ~MODE_AG_CHANNEL_MASK;
+ val32 |= channel;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
+
+ /* Spur calibration */
+ rtl8188f_spur_calibration(priv, channel);
+
+ /* Set bandwidth mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ val32 |= hw->conf.chandef.width == NL80211_CHAN_WIDTH_40;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ val32 |= hw->conf.chandef.width == NL80211_CHAN_WIDTH_40;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ /* RXADC CLK */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= GENMASK(10, 8);
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ /* TXDAC CLK */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= BIT(14) | BIT(12);
+ val32 &= ~BIT(13);
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ /* small BW */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT);
+ val32 &= ~GENMASK(31, 30);
+ rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32);
+
+ /* adc buffer clk */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT);
+ val32 &= ~BIT(29);
+ val32 |= BIT(28);
+ rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32);
+
+ /* adc buffer clk */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_AFE);
+ val32 &= ~BIT(29);
+ val32 |= BIT(28);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_AFE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR);
+ val32 &= ~BIT(19);
+ rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR);
+ val32 &= ~GENMASK(23, 20);
+ val32 |= BIT(21);
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 ||
+ hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ val32 |= BIT(20);
+ else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 |= BIT(22);
+ rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32);
+
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) {
+ if (hw->conf.chandef.center_freq1 >
+ hw->conf.chandef.chan->center_freq) {
+ sec_ch_above = 1;
+ channel += 2;
+ } else {
+ sec_ch_above = 0;
+ channel -= 2;
+ }
+
+ /* Set Control channel to upper or lower. */
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+ val32 &= ~CCK0_SIDEBAND;
+ if (!sec_ch_above)
+ val32 |= CCK0_SIDEBAND;
+ rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_DATA_SUBCHANNEL);
+ val32 &= ~GENMASK(3, 0);
+ if (sec_ch_above)
+ subchannel = 2;
+ else
+ subchannel = 1;
+ val32 |= subchannel;
+ rtl8xxxu_write32(priv, REG_DATA_SUBCHANNEL, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ val32 &= ~RSR_RSC_BANDWIDTH_40M;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+ }
+
+ /* RF TRX_BW */
+ val32 = channel;
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 ||
+ hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ val32 |= MODE_AG_BW_20MHZ_8723B;
+ else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 |= MODE_AG_BW_40MHZ_8723B;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
+
+ /* FILTER BW&RC Corner (ACPR) */
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 ||
+ hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ val32 = 0x00065;
+ else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 = 0x00025;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RXG_MIX_SWBW, val32);
+
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 ||
+ hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ val32 = 0x0;
+ else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 = 0x01000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RX_BB2, val32);
+
+ /* RC Corner */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00140);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RX_G2, 0x01c6c);
+}
+
+static void rtl8188fu_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u8 agg_ctrl, rxdma_mode, usb_tx_agg_desc_num = 6;
+ u32 agg_rx, val32;
+
+ /* TX aggregation */
+ val32 = rtl8xxxu_read32(priv, REG_DWBCN0_CTRL_8188F);
+ val32 &= ~(0xf << 4);
+ val32 |= usb_tx_agg_desc_num << 4;
+ rtl8xxxu_write32(priv, REG_DWBCN0_CTRL_8188F, val32);
+ rtl8xxxu_write8(priv, REG_DWBCN1_CTRL_8723B, usb_tx_agg_desc_num << 1);
+
+ /* RX aggregation */
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+
+ agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH);
+ agg_rx &= ~RXDMA_USB_AGG_ENABLE;
+ agg_rx &= ~0xFF0F; /* reset agg size and timeout */
+
+ rxdma_mode = rtl8xxxu_read8(priv, REG_RXDMA_PRO_8723B);
+ rxdma_mode &= ~BIT(1);
+
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+ rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx);
+ rtl8xxxu_write8(priv, REG_RXDMA_PRO_8723B, rxdma_mode);
+}
+
+static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ /* Time duration for NHM unit: 4us, 0xc350=200ms */
+ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0xc350);
+ rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff);
+ rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff50);
+ rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff);
+
+ /* TH8 */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 |= 0xff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Enable CCK */
+ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B);
+ val32 &= ~(BIT(8) | BIT(9) | BIT(10));
+ val32 |= BIT(8);
+ rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32);
+
+ /* Max power amongst all RX antennas */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC);
+ val32 |= BIT(7);
+ rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
+}
+
+static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+
+ priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
+ priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+
+ priv->default_crystal_cap = efuse->xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.7s\n", efuse->device_name);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8188fu_efuse));
+ for (i = 0; i < sizeof(struct rtl8188fu_efuse); i += 8)
+ dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
+ }
+
+ return 0;
+}
+
+static int rtl8188fu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ fw_name = "rtlwifi/rtl8188fufw.bin";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static void rtl8188fu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ /* Enable BB and RF */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /*
+ * Per vendor driver, run power sequence before init of RF
+ */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ usleep_range(10, 20);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780);
+
+ val8 = SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_USBA | SYS_FUNC_USBD;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ rtl8xxxu_init_phy_regs(priv, rtl8188fu_phy_init_table);
+ rtl8xxxu_init_phy_regs(priv, rtl8188f_agc_table);
+}
+
+static int rtl8188fu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ if (priv->chip_cut == 1)
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8188fu_cut_b_radioa_init_table, RF_A);
+ else
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8188fu_radioa_init_table, RF_A);
+
+ return ret;
+}
+
+static void rtl8188f_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u32 rf_amode, lstf;
+ int i;
+
+ /* Check continuous TX and Packet TX */
+ lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+
+ if (lstf & OFDM_LSTF_MASK) {
+ /* Disable all continuous TX */
+ val32 = lstf & ~OFDM_LSTF_MASK;
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+ } else {
+ /* Deal with Packet TX case */
+ /* block all queues */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+ }
+
+ /* Read original RF mode Path A */
+ rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
+
+ /* Start LC calibration */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, rf_amode | 0x08000);
+
+ for (i = 0; i < 100; i++) {
+ if ((rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG) & 0x08000) == 0)
+ break;
+ msleep(10);
+ }
+
+ if (i == 100)
+ dev_warn(&priv->udev->dev, "LC calibration timed out.\n");
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, rf_amode);
+
+ /* Restore original parameters */
+ if (lstf & OFDM_LSTF_MASK)
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf);
+ else /* Deal with Packet TX case */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static int rtl8188fu_iqk_path_a(struct rtl8xxxu_priv *priv, u32 *lok_result)
+{
+ u32 reg_eac, reg_e94, reg_e9c, val32;
+ int result = 0;
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0x07ff7);
+
+ /* PA,PAD gain adjust */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x5102a);
+
+ /* enter IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ff);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(25);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ /* save LOK result */
+ *lok_result = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_TXM_IDAC);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+
+ return result;
+}
+
+static int rtl8188fu_rx_iqk_path_a(struct rtl8xxxu_priv *priv, u32 lok_result)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+ int result = 0;
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf1173);
+
+ /* PA,PAD gain adjust */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x5102a);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x30008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160fff);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(25);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+ val32 = 0x80007c00 | (reg_e94 & 0x3ff0000) |
+ ((reg_e9c & 0x3ff0000) >> 16);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /*
+ * Modify RX IQK mode table
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ff2);
+
+ /*
+ * PA, PAD setting
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * RX IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x30008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x281613ff);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(25);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ /* reload LOK value */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXM_IDAC, lok_result);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+
+out:
+ return result;
+}
+
+static void rtl8188fu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32, rx_initial_gain, lok_result;
+ u32 path_sel_bb, path_sel_rf;
+ int path_a_ok;
+ int retry = 2;
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+ };
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ rx_initial_gain = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ if (t == 0) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+ priv->pi_enabled = val32 & FPGA0_HSSI_PARM1_PI;
+ }
+
+ /* save RF path */
+ path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+ path_sel_rf = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_S0S1);
+
+ /* BB setting */
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x25204000);
+
+ /* MAC settings */
+ val32 = rtl8xxxu_read32(priv, REG_TX_PTCL_CTRL);
+ val32 |= 0x00ff0000;
+ rtl8xxxu_write32(priv, REG_TX_PTCL_CTRL, val32);
+
+ /* IQ calibration setting */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0xff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8188fu_iqk_path_a(priv, &lok_result);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0xff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8188fu_rx_iqk_path_a(priv, lok_result);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A IQK failed!\n", __func__);
+
+ /* Back to BB mode, load original value */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0xff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (t == 0)
+ return;
+
+ if (!priv->pi_enabled) {
+ /*
+ * Switch back BB to SI mode after finishing
+ * IQ Calibration
+ */
+ val32 = 0x01000000;
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32);
+ }
+
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Reload RF path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, path_sel_rf);
+
+ /* Restore RX initial gain */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ val32 |= 0x50;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32);
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ val32 |= rx_initial_gain & 0xff;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32);
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+}
+
+static void rtl8188fu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ s32 reg_tmp = 0;
+ bool simu;
+ u32 path_sel_bb, path_sel_rf;
+
+ /* Save RF path */
+ path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+ path_sel_rf = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_S0S1);
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+
+ for (i = 0; i < 3; i++) {
+ rtl8188fu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_gen2_simularity_compare(priv, result, 1, 2);
+ if (simu) {
+ candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp)
+ candidate = 3;
+ else
+ candidate = -1;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n",
+ __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, path_sel_rf);
+}
+
+static void rtl8188f_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u16 val8;
+
+ /* 0x04[12:11] = 2b'01enable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~((APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND) >> 8);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* 0xC4[4] <= 1, turn off USB APHY LDO under suspend mode */
+ val8 = rtl8xxxu_read8(priv, 0xc4);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0xc4, val8);
+}
+
+static int rtl8188f_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* Disable SW LPS */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(APS_FSMCO_SW_LPS >> 8);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready */
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Disable HWPDN */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(APS_FSMCO_HW_POWERDOWN >> 8);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* Disable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(APS_FSMCO_HW_SUSPEND >> 8);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* set, then poll until 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= APS_FSMCO_MAC_ENABLE >> 8;
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* 0x27<=35 to reduce RF noise */
+ val8 = rtl8xxxu_write8(priv, 0x27, 0x35);
+exit:
+ return ret;
+}
+
+static int rtl8188fu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* 0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */
+ val8 = rtl8xxxu_read8(priv, 0x4e);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, 0x4e, val8);
+
+ /* 0x27 <= 34, xtal_qsel = 0 to xtal bring up */
+ rtl8xxxu_write8(priv, 0x27, 0x34);
+
+ /* 0x04[9] = 1 turn off MAC by HW state machine */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= APS_FSMCO_MAC_OFF >> 8;
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_OFF) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8188fu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* 0x04[12:11] = 2b'01 enable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~((APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND) >> 8);
+ val8 |= APS_FSMCO_HW_SUSPEND >> 8;
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* 0xC4[4] <= 1, turn off USB APHY LDO under suspend mode */
+ val8 = rtl8xxxu_read8(priv, 0xc4);
+ val8 |= BIT(4);
+ rtl8xxxu_write8(priv, 0xc4, val8);
+
+ return 0;
+}
+
+static int rtl8188fu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retry, retval;
+
+ /* set RPWM IMR */
+ val8 = rtl8xxxu_read8(priv, REG_FTIMR + 1);
+ val8 |= IMR0_CPWM >> 8;
+ rtl8xxxu_write8(priv, REG_FTIMR + 1, val8);
+
+ /* Tx Pause */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ retry = 100;
+ retval = -EBUSY;
+
+ /*
+ * Poll 32 bit wide REG_SCH_TX_CMD for 0x00000000 to ensure no TX is pending.
+ */
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD);
+ if (!val32) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ if (!retry) {
+ dev_warn(dev, "Failed to flush TX queue\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Whole BB is reset */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /* Reset MAC TRX */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= 0x3f;
+ val16 &= ~(CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | CR_SECURITY_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /* Respond TxOK to scheduler */
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+out:
+ return retval;
+}
+
+static int rtl8188fu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ int ret;
+
+ rtl8188f_disabled_to_emu(priv);
+
+ ret = rtl8188f_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ rtl8xxxu_write8(priv, REG_CR, 0);
+
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+exit:
+ return ret;
+}
+
+static void rtl8188fu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ val16 = rtl8xxxu_read16(priv, REG_GPIO_MUXCFG);
+ val16 &= ~BIT(12);
+ rtl8xxxu_write16(priv, REG_GPIO_MUXCFG, val16);
+
+ rtl8xxxu_write32(priv, REG_HISR0, 0xFFFFFFFF);
+ rtl8xxxu_write32(priv, REG_HISR1, 0xFFFFFFFF);
+
+ /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ rtl8188fu_active_to_lps(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8188fu_active_to_emu(priv);
+ rtl8188fu_emu_to_disabled(priv);
+}
+
+#define PPG_BB_GAIN_2G_TXA_OFFSET_8188F 0xee
+#define PPG_BB_GAIN_2G_TX_OFFSET_MASK 0x0f
+
+static void rtl8188f_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u8 pg_pwrtrim = 0xff, val8;
+ s8 bb_gain;
+
+ /* Somehow this is not found in the efuse we read earlier. */
+ rtl8xxxu_read_efuse8(priv, PPG_BB_GAIN_2G_TXA_OFFSET_8188F, &pg_pwrtrim);
+
+ if (pg_pwrtrim != 0xff) {
+ bb_gain = pg_pwrtrim & PPG_BB_GAIN_2G_TX_OFFSET_MASK;
+
+ if (bb_gain == PPG_BB_GAIN_2G_TX_OFFSET_MASK)
+ bb_gain = 0;
+ else if (bb_gain & 1)
+ bb_gain = bb_gain >> 1;
+ else
+ bb_gain = -(bb_gain >> 1);
+
+ val8 = abs(bb_gain);
+ if (bb_gain > 0)
+ val8 |= BIT(5);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55);
+ val32 &= ~0xfc000;
+ val32 |= val8 << 14;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, val32);
+ }
+
+ rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+ val32 |= OFDM_RF_PATH_RX_A | OFDM_RF_PATH_TX_A;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8188f_disable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~OFDM_RF_PATH_TX_MASK;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ /* Power down RF module */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+}
+
+static void rtl8188f_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ u32 val32;
+
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
+ val32 |= TXDMA_OFFSET_DROP_DATA_EN;
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+}
+
+#define XTAL1 GENMASK(22, 17)
+#define XTAL0 GENMASK(16, 11)
+
+static void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap)
+{
+ struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking;
+ u32 val32;
+
+ if (crystal_cap == cfo->crystal_cap)
+ return;
+
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+
+ dev_dbg(&priv->udev->dev,
+ "%s: Adjusting crystal cap from 0x%x (actually 0x%lx 0x%lx) to 0x%x\n",
+ __func__,
+ cfo->crystal_cap,
+ FIELD_GET(XTAL1, val32),
+ FIELD_GET(XTAL0, val32),
+ crystal_cap);
+
+ val32 &= ~(XTAL1 | XTAL0);
+ val32 |= FIELD_PREP(XTAL1, crystal_cap) |
+ FIELD_PREP(XTAL0, crystal_cap);
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+ cfo->crystal_cap = crystal_cap;
+}
+
+static s8 rtl8188f_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+{
+ s8 rx_pwr_all = 0x00;
+ u8 vga_idx, lna_idx;
+
+ lna_idx = (cck_agc_rpt & 0xE0) >> 5;
+ vga_idx = cck_agc_rpt & 0x1F;
+
+ switch (lna_idx) {
+ case 7:
+ if (vga_idx <= 27)
+ rx_pwr_all = -100 + 2 * (27 - vga_idx);
+ else
+ rx_pwr_all = -100;
+ break;
+ case 5:
+ rx_pwr_all = -74 + 2 * (21 - vga_idx);
+ break;
+ case 3:
+ rx_pwr_all = -60 + 2 * (20 - vga_idx);
+ break;
+ case 1:
+ rx_pwr_all = -44 + 2 * (19 - vga_idx);
+ break;
+ default:
+ break;
+ }
+
+ return rx_pwr_all;
+}
+
+struct rtl8xxxu_fileops rtl8188fu_fops = {
+ .identify_chip = rtl8188fu_identify_chip,
+ .parse_efuse = rtl8188fu_parse_efuse,
+ .load_firmware = rtl8188fu_load_firmware,
+ .power_on = rtl8188fu_power_on,
+ .power_off = rtl8188fu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .init_phy_bb = rtl8188fu_init_phy_bb,
+ .init_phy_rf = rtl8188fu_init_phy_rf,
+ .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
+ .phy_lc_calibrate = rtl8188f_phy_lc_calibrate,
+ .phy_iq_calibrate = rtl8188fu_phy_iq_calibrate,
+ .config_channel = rtl8188fu_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+ .init_aggregation = rtl8188fu_init_aggregation,
+ .init_statistics = rtl8188fu_init_statistics,
+ .init_burst = rtl8xxxu_init_burst,
+ .enable_rf = rtl8188f_enable_rf,
+ .disable_rf = rtl8188f_disable_rf,
+ .usb_quirks = rtl8188f_usb_quirks,
+ .set_tx_power = rtl8188f_set_tx_power,
+ .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
+ .set_crystal_cap = rtl8188f_set_crystal_cap,
+ .cck_rssi = rtl8188f_cck_rssi,
+ .writeN_block_size = 128,
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+ .has_s0s1 = 1,
+ .has_tx_report = 1,
+ .gen2_thermal_meter = 1,
+ .needs_full_init = 1,
+ .adda_1t_init = 0x03c00014,
+ .adda_1t_path_on = 0x03c00014,
+ .trxff_boundary = 0x3f7f,
+ .pbp_rx = PBP_PAGE_SIZE_256,
+ .pbp_tx = PBP_PAGE_SIZE_256,
+ .mactable = rtl8188f_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8188F,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8188F,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8188F,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8188F,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index 27c4cb688be4..3bef9ffc8b02 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -77,7 +77,7 @@ static struct rtl8xxxu_power_base rtl8188r_power_base = {
.reg_0868 = 0x00020204,
};
-static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
{0x04, 0x000210e7}, {0x09, 0x0002044f},
@@ -152,7 +152,7 @@ static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
{0xff, 0xffffffff}
};
-static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
{0x04, 0x000210e7}, {0x09, 0x0002044f},
@@ -176,7 +176,7 @@ static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
{0xff, 0xffffffff}
};
-static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
{0x04, 0x000210e7}, {0x09, 0x0002044f},
@@ -251,7 +251,7 @@ static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
{0xff, 0xffffffff}
};
-static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+static const struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
{0x04, 0x000210e7}, {0x09, 0x0002044f},
@@ -326,6 +326,64 @@ static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
{0xff, 0xffffffff}
};
+static int rtl8192cu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 val32, bonding, sys_cfg, vendor;
+ int ret = 0;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ if (sys_cfg & SYS_CFG_TYPE_ID) {
+ bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+ bonding &= HPON_FSM_BONDING_MASK;
+ if (bonding == HPON_FSM_BONDING_1T2R) {
+ strscpy(priv->chip_name, "8191CU", sizeof(priv->chip_name));
+ priv->tx_paths = 1;
+ priv->usb_interrupts = 1;
+ priv->rtl_chip = RTL8191C;
+ } else {
+ strscpy(priv->chip_name, "8192CU", sizeof(priv->chip_name));
+ priv->tx_paths = 2;
+ priv->usb_interrupts = 0;
+ priv->rtl_chip = RTL8192C;
+ }
+ priv->rf_paths = 2;
+ priv->rx_paths = 2;
+ } else {
+ strscpy(priv->chip_name, "8188CU", sizeof(priv->chip_name));
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+ priv->rtl_chip = RTL8188C;
+ priv->usb_interrupts = 0;
+ }
+ priv->has_wifi = 1;
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_ID;
+ rtl8xxxu_identify_vendor_1bit(priv, vendor);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+ priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID);
+
+ rtl8xxxu_config_endpoints_sie(priv);
+
+ /*
+ * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+ */
+ if (!priv->ep_tx_count)
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+out:
+ return ret;
+}
+
static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
{
char *fw_name;
@@ -392,7 +450,7 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
priv->power_base = &rtl8192c_power_base;
if (efuse->rf_regulatory & 0x20) {
- sprintf(priv->chip_name, "8188RU");
+ strscpy(priv->chip_name, "8188RU", sizeof(priv->chip_name));
priv->rtl_chip = RTL8188R;
priv->hi_pa = 1;
priv->no_pape = 1;
@@ -413,7 +471,7 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv)
{
- struct rtl8xxxu_rfregval *rftable;
+ const struct rtl8xxxu_rfregval *rftable;
int ret;
if (priv->rtl_chip == RTL8188R) {
@@ -541,6 +599,7 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
}
struct rtl8xxxu_fileops rtl8192cu_fops = {
+ .identify_chip = rtl8192cu_identify_chip,
.parse_efuse = rtl8192cu_parse_efuse,
.load_firmware = rtl8192cu_load_firmware,
.power_on = rtl8192cu_power_on,
@@ -549,6 +608,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.llt_init = rtl8xxxu_init_llt_table,
.init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
.init_phy_rf = rtl8192cu_init_phy_rf,
+ .phy_lc_calibrate = rtl8723a_phy_lc_calibrate,
.phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
.config_channel = rtl8xxxu_gen1_config_channel,
.parse_rx_desc = rtl8xxxu_parse_rxdesc16,
@@ -560,6 +620,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
+ .cck_rssi = rtl8723a_cck_rssi,
.writeN_block_size = 128,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index b06508d0cdf8..6dc1e5858e77 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -32,7 +32,7 @@
#include "rtl8xxxu.h"
#include "rtl8xxxu_regs.h"
-static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
+static const struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
{0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
{0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00},
{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
@@ -62,7 +62,7 @@ static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
{0xffff, 0xff},
};
-static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
+static const struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
{0x810, 0x10001331}, {0x814, 0x020c3d10},
@@ -194,7 +194,7 @@ static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
+static const struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
{0xc78, 0xfb000001}, {0xc78, 0xfb010001},
{0xc78, 0xfb020001}, {0xc78, 0xfb030001},
{0xc78, 0xfb040001}, {0xc78, 0xfb050001},
@@ -263,7 +263,7 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
+static const struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
{0xc78, 0xfa000001}, {0xc78, 0xf9010001},
{0xc78, 0xf8020001}, {0xc78, 0xf7030001},
{0xc78, 0xf6040001}, {0xc78, 0xf5050001},
@@ -332,7 +332,7 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
{0x7f, 0x00000082}, {0x81, 0x0003fc00},
{0x00, 0x00030000}, {0x08, 0x00008400},
{0x18, 0x00000407}, {0x19, 0x00000012},
@@ -412,7 +412,7 @@ static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
{0xff, 0xffffffff}
};
-static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
{0x7f, 0x00000082}, {0x81, 0x0003fc00},
{0x00, 0x00030000}, {0x08, 0x00008400},
{0x18, 0x00000407}, {0x19, 0x00000012},
@@ -478,6 +478,53 @@ static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
{0xff, 0xffffffff}
};
+static int rtl8192eu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 val32, bonding, sys_cfg, vendor;
+ int ret = 0;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+ bonding &= HPON_FSM_BONDING_MASK;
+ if (bonding == HPON_FSM_BONDING_1T2R) {
+ strscpy(priv->chip_name, "8191EU", sizeof(priv->chip_name));
+ priv->tx_paths = 1;
+ priv->rtl_chip = RTL8191E;
+ } else {
+ strscpy(priv->chip_name, "8192EU", sizeof(priv->chip_name));
+ priv->tx_paths = 2;
+ priv->rtl_chip = RTL8192E;
+ }
+ priv->rf_paths = 2;
+ priv->rx_paths = 2;
+ priv->has_wifi = 1;
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_EXT_MASK;
+ rtl8xxxu_identify_vendor_2bits(priv, vendor);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+ priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID);
+
+ rtl8xxxu_config_endpoints_sie(priv);
+
+ /*
+ * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+ */
+ if (!priv->ep_tx_count)
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+out:
+ return ret;
+}
+
static void
rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
{
@@ -635,8 +682,7 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
}
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
+ priv->default_crystal_cap = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
/*
* device_info section seems to be laid out as records
@@ -1671,7 +1717,30 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
}
+static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+{
+ static const s8 lna_gain_table_0[8] = {15, 9, -10, -21, -23, -27, -43, -44};
+ static const s8 lna_gain_table_1[8] = {24, 18, 13, -4, -11, -18, -31, -36};
+
+ s8 rx_pwr_all = 0x00;
+ u8 vga_idx, lna_idx;
+ s8 lna_gain = 0;
+
+ lna_idx = (cck_agc_rpt & 0xE0) >> 5;
+ vga_idx = cck_agc_rpt & 0x1F;
+
+ if (priv->cck_agc_report_type == 0)
+ lna_gain = lna_gain_table_0[lna_idx];
+ else
+ lna_gain = lna_gain_table_1[lna_idx];
+
+ rx_pwr_all = lna_gain - (2 * vga_idx);
+
+ return rx_pwr_all;
+}
+
struct rtl8xxxu_fileops rtl8192eu_fops = {
+ .identify_chip = rtl8192eu_identify_chip,
.parse_efuse = rtl8192eu_parse_efuse,
.load_firmware = rtl8192eu_load_firmware,
.power_on = rtl8192eu_power_on,
@@ -1680,6 +1749,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.llt_init = rtl8xxxu_auto_llt_table,
.init_phy_bb = rtl8192eu_init_phy_bb,
.init_phy_rf = rtl8192eu_init_phy_rf,
+ .phy_lc_calibrate = rtl8723a_phy_lc_calibrate,
.phy_iq_calibrate = rtl8192eu_phy_iq_calibrate,
.config_channel = rtl8xxxu_gen2_config_channel,
.parse_rx_desc = rtl8xxxu_parse_rxdesc24,
@@ -1690,6 +1760,8 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
+ .set_crystal_cap = rtl8723a_set_crystal_cap,
+ .cck_rssi = rtl8192e_cck_rssi,
.writeN_block_size = 128,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 4f93f88716a9..707ac48ecc83 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -54,7 +54,7 @@ static struct rtl8xxxu_power_base rtl8723a_power_base = {
.reg_0868 = 0x02040608,
};
-static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00039c63},
{0x04, 0x000210e7}, {0x09, 0x0002044f},
@@ -129,6 +129,55 @@ static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
{0xff, 0xffffffff}
};
+static int rtl8723au_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 val32, sys_cfg, vendor;
+ int ret = 0;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ strscpy(priv->chip_name, "8723AU", sizeof(priv->chip_name));
+ priv->usb_interrupts = 1;
+ priv->rtl_chip = RTL8723A;
+
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+
+ val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
+ if (val32 & MULTI_WIFI_FUNC_EN)
+ priv->has_wifi = 1;
+ if (val32 & MULTI_BT_FUNC_EN)
+ priv->has_bluetooth = 1;
+ if (val32 & MULTI_GPS_FUNC_EN)
+ priv->has_gps = 1;
+ priv->is_multi_func = 1;
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_ID;
+ rtl8xxxu_identify_vendor_1bit(priv, vendor);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+ priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID);
+
+ rtl8xxxu_config_endpoints_sie(priv);
+
+ /*
+ * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+ */
+ if (!priv->ep_tx_count)
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+out:
+ return ret;
+}
+
static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723;
@@ -166,10 +215,10 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
efuse->ht20_max_power_offset,
sizeof(efuse->ht20_max_power_offset));
- if (priv->efuse_wifi.efuse8723.version >= 0x01) {
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
- }
+ if (priv->efuse_wifi.efuse8723.version >= 0x01)
+ priv->default_crystal_cap = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+ else
+ priv->fops->set_crystal_cap = NULL;
priv->power_base = &rtl8723a_power_base;
@@ -357,7 +406,59 @@ exit:
return ret;
}
+#define XTAL1 GENMASK(23, 18)
+#define XTAL0 GENMASK(17, 12)
+
+void rtl8723a_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap)
+{
+ struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking;
+ u32 val32;
+
+ if (crystal_cap == cfo->crystal_cap)
+ return;
+
+ val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
+
+ dev_dbg(&priv->udev->dev,
+ "%s: Adjusting crystal cap from 0x%x (actually 0x%lx 0x%lx) to 0x%x\n",
+ __func__,
+ cfo->crystal_cap,
+ FIELD_GET(XTAL1, val32),
+ FIELD_GET(XTAL0, val32),
+ crystal_cap);
+
+ val32 &= ~(XTAL1 | XTAL0);
+ val32 |= FIELD_PREP(XTAL1, crystal_cap) |
+ FIELD_PREP(XTAL0, crystal_cap);
+ rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
+
+ cfo->crystal_cap = crystal_cap;
+}
+
+s8 rtl8723a_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+{
+ s8 rx_pwr_all = 0x00;
+
+ switch (cck_agc_rpt & 0xc0) {
+ case 0xc0:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x80:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x40:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x00:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+
+ return rx_pwr_all;
+}
+
struct rtl8xxxu_fileops rtl8723au_fops = {
+ .identify_chip = rtl8723au_identify_chip,
.parse_efuse = rtl8723au_parse_efuse,
.load_firmware = rtl8723au_load_firmware,
.power_on = rtl8723au_power_on,
@@ -366,6 +467,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.llt_init = rtl8xxxu_init_llt_table,
.init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
.init_phy_rf = rtl8723au_init_phy_rf,
+ .phy_lc_calibrate = rtl8723a_phy_lc_calibrate,
.phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
.config_channel = rtl8xxxu_gen1_config_channel,
.parse_rx_desc = rtl8xxxu_parse_rxdesc16,
@@ -377,6 +479,8 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
+ .set_crystal_cap = rtl8723a_set_crystal_cap,
+ .cck_rssi = rtl8723a_cck_rssi,
.writeN_block_size = 1024,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index a71e1816e632..a0ec895b61a4 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -32,7 +32,7 @@
#include "rtl8xxxu.h"
#include "rtl8xxxu_regs.h"
-static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
+static const struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
{0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
{0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10},
{0x430, 0x00}, {0x431, 0x00},
@@ -63,7 +63,7 @@ static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
{0xffff, 0xff},
};
-static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
+static const struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
{0x810, 0x10001331}, {0x814, 0x020c3d10},
@@ -164,7 +164,7 @@ static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
+static const struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
{0xc78, 0xfd000001}, {0xc78, 0xfc010001},
{0xc78, 0xfb020001}, {0xc78, 0xfa030001},
{0xc78, 0xf9040001}, {0xc78, 0xf8050001},
@@ -235,7 +235,7 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
+static const struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
{0x00, 0x00010000}, {0xb0, 0x000dffe0},
{0xfe, 0x00000000}, {0xfe, 0x00000000},
{0xfe, 0x00000000}, {0xb1, 0x00000018},
@@ -304,6 +304,53 @@ static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
{0xff, 0xffffffff}
};
+static int rtl8723bu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 val32, sys_cfg, vendor;
+ int ret = 0;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ strscpy(priv->chip_name, "8723BU", sizeof(priv->chip_name));
+ priv->rtl_chip = RTL8723B;
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+
+ val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
+ if (val32 & MULTI_WIFI_FUNC_EN)
+ priv->has_wifi = 1;
+ if (val32 & MULTI_BT_FUNC_EN)
+ priv->has_bluetooth = 1;
+ if (val32 & MULTI_GPS_FUNC_EN)
+ priv->has_gps = 1;
+ priv->is_multi_func = 1;
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_EXT_MASK;
+ rtl8xxxu_identify_vendor_2bits(priv, vendor);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+ priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID);
+
+ rtl8xxxu_config_endpoints_sie(priv);
+
+ /*
+ * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+ */
+ if (!priv->ep_tx_count)
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+out:
+ return ret;
+}
+
static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
{
struct h2c_cmd h2c;
@@ -445,8 +492,7 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
}
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
+ priv->default_crystal_cap = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
@@ -518,7 +564,7 @@ static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv)
return ret;
}
-static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
+void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
{
u32 val32;
@@ -1640,7 +1686,36 @@ static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
}
+static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+{
+ s8 rx_pwr_all = 0x00;
+ u8 vga_idx, lna_idx;
+
+ lna_idx = (cck_agc_rpt & 0xE0) >> 5;
+ vga_idx = cck_agc_rpt & 0x1F;
+
+ switch (lna_idx) {
+ case 6:
+ rx_pwr_all = -34 - (2 * vga_idx);
+ break;
+ case 4:
+ rx_pwr_all = -14 - (2 * vga_idx);
+ break;
+ case 1:
+ rx_pwr_all = 6 - (2 * vga_idx);
+ break;
+ case 0:
+ rx_pwr_all = 16 - (2 * vga_idx);
+ break;
+ default:
+ break;
+ }
+
+ return rx_pwr_all;
+}
+
struct rtl8xxxu_fileops rtl8723bu_fops = {
+ .identify_chip = rtl8723bu_identify_chip,
.parse_efuse = rtl8723bu_parse_efuse,
.load_firmware = rtl8723bu_load_firmware,
.power_on = rtl8723bu_power_on,
@@ -1650,11 +1725,13 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.init_phy_bb = rtl8723bu_init_phy_bb,
.init_phy_rf = rtl8723bu_init_phy_rf,
.phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
+ .phy_lc_calibrate = rtl8723a_phy_lc_calibrate,
.phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
.config_channel = rtl8xxxu_gen2_config_channel,
.parse_rx_desc = rtl8xxxu_parse_rxdesc24,
.init_aggregation = rtl8723bu_init_aggregation,
.init_statistics = rtl8723bu_init_statistics,
+ .init_burst = rtl8xxxu_init_burst,
.enable_rf = rtl8723b_enable_rf,
.disable_rf = rtl8xxxu_gen2_disable_rf,
.usb_quirks = rtl8xxxu_gen2_usb_quirks,
@@ -1662,6 +1739,8 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
+ .set_crystal_cap = rtl8723a_set_crystal_cap,
+ .cck_rssi = rtl8723b_cck_rssi,
.writeN_block_size = 1024,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index ac641a56efb0..28f136064297 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -52,6 +52,7 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192eu_nic.bin");
MODULE_FIRMWARE("rtlwifi/rtl8723bu_nic.bin");
MODULE_FIRMWARE("rtlwifi/rtl8723bu_bt.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8188fufw.bin");
module_param_named(debug, rtl8xxxu_debug, int, 0600);
MODULE_PARM_DESC(debug, "Set debug mask");
@@ -127,7 +128,7 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = {
.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
};
-struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
+const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
{0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
{0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
@@ -152,7 +153,7 @@ struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
{0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
};
-static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
+static const struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
{0x810, 0x10001331}, {0x814, 0x020c3d10},
@@ -250,7 +251,7 @@ static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
+static const struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
{0x800, 0x80040002}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -348,7 +349,7 @@ static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
+static const struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
{0x040, 0x000c0004}, {0x800, 0x80040000},
{0x804, 0x00000001}, {0x808, 0x0000fc00},
@@ -447,7 +448,7 @@ static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
+static const struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
{0xc78, 0x7b040001}, {0xc78, 0x7b050001},
@@ -531,7 +532,7 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
+static const struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
{0xc78, 0x7b040001}, {0xc78, 0x7b050001},
@@ -615,7 +616,7 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
+static const struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
{ /* RF_A */
.hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
.hssiparm2 = REG_FPGA0_XA_HSSI_PARM2,
@@ -1573,30 +1574,14 @@ rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm)
static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
- char *cut;
+ char cut = '?';
- switch (priv->chip_cut) {
- case 0:
- cut = "A";
- break;
- case 1:
- cut = "B";
- break;
- case 2:
- cut = "C";
- break;
- case 3:
- cut = "D";
- break;
- case 4:
- cut = "E";
- break;
- default:
- cut = "unknown";
- }
+ /* Currently always true: chip_cut is 4 bits. */
+ if (priv->chip_cut <= 15)
+ cut = 'A' + priv->chip_cut;
dev_info(dev,
- "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+ "RTL%s rev %c (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
priv->chip_name, cut, priv->chip_vendor, priv->tx_paths,
priv->rx_paths, priv->ep_tx_count, priv->has_wifi,
priv->has_bluetooth, priv->has_gps, priv->hi_pa);
@@ -1604,123 +1589,41 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
}
-static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_identify_vendor_1bit(struct rtl8xxxu_priv *priv, u32 vendor)
{
- struct device *dev = &priv->udev->dev;
- struct ieee80211_hw *hw = priv->hw;
- u32 val32, bonding;
- u16 val16;
-
- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
- priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
- SYS_CFG_CHIP_VERSION_SHIFT;
- if (val32 & SYS_CFG_TRP_VAUX_EN) {
- dev_info(dev, "Unsupported test chip\n");
- return -ENOTSUPP;
- }
-
- if (val32 & SYS_CFG_BT_FUNC) {
- if (priv->chip_cut >= 3) {
- sprintf(priv->chip_name, "8723BU");
- priv->rtl_chip = RTL8723B;
- } else {
- sprintf(priv->chip_name, "8723AU");
- priv->usb_interrupts = 1;
- priv->rtl_chip = RTL8723A;
- }
-
- priv->rf_paths = 1;
- priv->rx_paths = 1;
- priv->tx_paths = 1;
-
- val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
- if (val32 & MULTI_WIFI_FUNC_EN)
- priv->has_wifi = 1;
- if (val32 & MULTI_BT_FUNC_EN)
- priv->has_bluetooth = 1;
- if (val32 & MULTI_GPS_FUNC_EN)
- priv->has_gps = 1;
- priv->is_multi_func = 1;
- } else if (val32 & SYS_CFG_TYPE_ID) {
- bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
- bonding &= HPON_FSM_BONDING_MASK;
- if (priv->fops->tx_desc_size ==
- sizeof(struct rtl8xxxu_txdesc40)) {
- if (bonding == HPON_FSM_BONDING_1T2R) {
- sprintf(priv->chip_name, "8191EU");
- priv->rf_paths = 2;
- priv->rx_paths = 2;
- priv->tx_paths = 1;
- priv->rtl_chip = RTL8191E;
- } else {
- sprintf(priv->chip_name, "8192EU");
- priv->rf_paths = 2;
- priv->rx_paths = 2;
- priv->tx_paths = 2;
- priv->rtl_chip = RTL8192E;
- }
- } else if (bonding == HPON_FSM_BONDING_1T2R) {
- sprintf(priv->chip_name, "8191CU");
- priv->rf_paths = 2;
- priv->rx_paths = 2;
- priv->tx_paths = 1;
- priv->usb_interrupts = 1;
- priv->rtl_chip = RTL8191C;
- } else {
- sprintf(priv->chip_name, "8192CU");
- priv->rf_paths = 2;
- priv->rx_paths = 2;
- priv->tx_paths = 2;
- priv->usb_interrupts = 0;
- priv->rtl_chip = RTL8192C;
- }
- priv->has_wifi = 1;
+ if (vendor) {
+ strscpy(priv->chip_vendor, "UMC", sizeof(priv->chip_vendor));
+ priv->vendor_umc = 1;
} else {
- sprintf(priv->chip_name, "8188CU");
- priv->rf_paths = 1;
- priv->rx_paths = 1;
- priv->tx_paths = 1;
- priv->rtl_chip = RTL8188C;
- priv->usb_interrupts = 0;
- priv->has_wifi = 1;
+ strscpy(priv->chip_vendor, "TSMC", sizeof(priv->chip_vendor));
}
+}
- hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1;
- hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1;
-
- switch (priv->rtl_chip) {
- case RTL8188E:
- case RTL8192E:
- case RTL8723B:
- switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
- case SYS_CFG_VENDOR_ID_TSMC:
- sprintf(priv->chip_vendor, "TSMC");
- break;
- case SYS_CFG_VENDOR_ID_SMIC:
- sprintf(priv->chip_vendor, "SMIC");
- priv->vendor_smic = 1;
- break;
- case SYS_CFG_VENDOR_ID_UMC:
- sprintf(priv->chip_vendor, "UMC");
- priv->vendor_umc = 1;
- break;
- default:
- sprintf(priv->chip_vendor, "unknown");
- }
+void rtl8xxxu_identify_vendor_2bits(struct rtl8xxxu_priv *priv, u32 vendor)
+{
+ switch (vendor) {
+ case SYS_CFG_VENDOR_ID_TSMC:
+ strscpy(priv->chip_vendor, "TSMC", sizeof(priv->chip_vendor));
+ break;
+ case SYS_CFG_VENDOR_ID_SMIC:
+ strscpy(priv->chip_vendor, "SMIC", sizeof(priv->chip_vendor));
+ priv->vendor_smic = 1;
+ break;
+ case SYS_CFG_VENDOR_ID_UMC:
+ strscpy(priv->chip_vendor, "UMC", sizeof(priv->chip_vendor));
+ priv->vendor_umc = 1;
break;
default:
- if (val32 & SYS_CFG_VENDOR_ID) {
- sprintf(priv->chip_vendor, "UMC");
- priv->vendor_umc = 1;
- } else {
- sprintf(priv->chip_vendor, "TSMC");
- }
+ strscpy(priv->chip_vendor, "unknown", sizeof(priv->chip_vendor));
}
+}
- val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
- priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28;
+void rtl8xxxu_config_endpoints_sie(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
val16 = rtl8xxxu_read16(priv, REG_NORMAL_SIE_EP_TX);
+
if (val16 & NORMAL_SIE_EP_TX_HIGH_MASK) {
priv->ep_tx_high_queue = 1;
priv->ep_tx_count++;
@@ -1735,35 +1638,35 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->ep_tx_low_queue = 1;
priv->ep_tx_count++;
}
+}
- /*
- * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
- */
- if (!priv->ep_tx_count) {
- switch (priv->nr_out_eps) {
- case 4:
- case 3:
- priv->ep_tx_low_queue = 1;
- priv->ep_tx_count++;
- fallthrough;
- case 2:
- priv->ep_tx_normal_queue = 1;
- priv->ep_tx_count++;
- fallthrough;
- case 1:
- priv->ep_tx_high_queue = 1;
- priv->ep_tx_count++;
- break;
- default:
- dev_info(dev, "Unsupported USB TX end-points\n");
- return -ENOTSUPP;
- }
+int rtl8xxxu_config_endpoints_no_sie(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+
+ switch (priv->nr_out_eps) {
+ case 4:
+ case 3:
+ priv->ep_tx_low_queue = 1;
+ priv->ep_tx_count++;
+ fallthrough;
+ case 2:
+ priv->ep_tx_normal_queue = 1;
+ priv->ep_tx_count++;
+ fallthrough;
+ case 1:
+ priv->ep_tx_high_queue = 1;
+ priv->ep_tx_count++;
+ break;
+ default:
+ dev_info(dev, "Unsupported USB TX end-points\n");
+ return -ENOTSUPP;
}
return 0;
}
-static int
+int
rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
{
int i;
@@ -1979,7 +1882,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
/*
* Init H2C command
*/
- if (priv->rtl_chip == RTL8723B)
+ if (priv->rtl_chip == RTL8723B || priv->rtl_chip == RTL8188F)
rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
exit:
return ret;
@@ -2004,7 +1907,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
if (val8 & MCU_FW_RAM_SEL) {
- pr_info("do the RAM reset\n");
+ dev_info(&priv->udev->dev,
+ "Firmware is already running, resetting the MCU.\n");
rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
priv->fops->reset_8051(priv);
}
@@ -2099,6 +2003,7 @@ int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
case 0x88c0:
case 0x5300:
case 0x2300:
+ case 0x88f0:
break;
default:
ret = -EINVAL;
@@ -2145,7 +2050,7 @@ void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
static int
rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
{
- struct rtl8xxxu_reg8val *array = priv->fops->mactable;
+ const struct rtl8xxxu_reg8val *array = priv->fops->mactable;
int i, ret;
u16 reg;
u8 val;
@@ -2166,14 +2071,16 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
}
}
- if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+ if (priv->rtl_chip != RTL8723B &&
+ priv->rtl_chip != RTL8192E &&
+ priv->rtl_chip != RTL8188F)
rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
return 0;
}
int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_reg32val *array)
+ const struct rtl8xxxu_reg32val *array)
{
int i, ret;
u16 reg;
@@ -2256,7 +2163,6 @@ void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv)
*/
static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
{
- u8 val8;
u32 val32;
priv->fops->init_phy_bb(priv);
@@ -2321,15 +2227,8 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
}
- if (priv->has_xtalk) {
- val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
-
- val8 = priv->xtalk;
- val32 &= 0xff000fff;
- val32 |= ((val8 | (val8 << 6)) << 12);
-
- rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
- }
+ if (priv->fops->set_crystal_cap)
+ priv->fops->set_crystal_cap(priv, priv->default_crystal_cap);
if (priv->rtl_chip == RTL8192E)
rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb);
@@ -2338,7 +2237,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
}
static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_rfregval *array,
+ const struct rtl8xxxu_rfregval *array,
enum rtl8xxxu_rfpath path)
{
int i, ret;
@@ -2386,7 +2285,7 @@ static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
}
int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_rfregval *table,
+ const struct rtl8xxxu_rfregval *table,
enum rtl8xxxu_rfpath path)
{
u32 val32;
@@ -3427,7 +3326,7 @@ void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
}
-static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
+void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
{
u32 val32;
u32 rf_amode, rf_bmode = 0, lstf;
@@ -3879,6 +3778,52 @@ static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_RQPN, val32);
}
+void rtl8xxxu_init_burst(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /*
+ * For USB high speed set 512B packets
+ */
+ val8 = rtl8xxxu_read8(priv, REG_RXDMA_PRO_8723B);
+ u8p_replace_bits(&val8, 1, RXDMA_PRO_DMA_BURST_SIZE);
+ u8p_replace_bits(&val8, 3, RXDMA_PRO_DMA_BURST_CNT);
+ val8 |= RXDMA_PRO_DMA_MODE;
+ rtl8xxxu_write8(priv, REG_RXDMA_PRO_8723B, val8);
+
+ /*
+ * Enable single packet AMPDU
+ */
+ val8 = rtl8xxxu_read8(priv, REG_HT_SINGLE_AMPDU_8723B);
+ val8 |= HT_SINGLE_AMPDU_ENABLE;
+ rtl8xxxu_write8(priv, REG_HT_SINGLE_AMPDU_8723B, val8);
+
+ rtl8xxxu_write16(priv, REG_MAX_AGGR_NUM, 0x0c14);
+ if (priv->rtl_chip == RTL8723B)
+ val8 = 0x5e;
+ else if (priv->rtl_chip == RTL8188F)
+ val8 = 0x70; /* 0x5e would make it very slow */
+ rtl8xxxu_write8(priv, REG_AMPDU_MAX_TIME_8723B, val8);
+ rtl8xxxu_write32(priv, REG_AGGLEN_LMT, 0xffffffff);
+ rtl8xxxu_write8(priv, REG_RX_PKT_LIMIT, 0x18);
+ rtl8xxxu_write8(priv, REG_PIFS, 0x00);
+ if (priv->rtl_chip == RTL8188F) {
+ rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL, FWHW_TXQ_CTRL_AMPDU_RETRY);
+ rtl8xxxu_write32(priv, REG_FAST_EDCA_CTRL, 0x03086666);
+ }
+ if (priv->rtl_chip == RTL8723B)
+ val8 = 0x50;
+ else if (priv->rtl_chip == RTL8188F)
+ val8 = 0x28; /* 0x50 would make the upload slow */
+ rtl8xxxu_write8(priv, REG_USTIME_TSF_8723B, val8);
+ rtl8xxxu_write8(priv, REG_USTIME_EDCA, val8);
+
+ /* to prevent mac is reseted by bus. */
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 |= RSV_CTRL_WLOCK_1C | RSV_CTRL_DIS_PRST;
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+}
+
static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -4031,6 +3976,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (priv->rtl_chip == RTL8192E) {
rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
+ } else if (priv->rtl_chip == RTL8188F) {
+ rtl8xxxu_write32(priv, REG_HISR0, 0xffffffff);
+ rtl8xxxu_write32(priv, REG_HISR1, 0xffffffff);
} else {
/*
* Enable all interrupts - not obvious USB needs to do this
@@ -4050,11 +3998,25 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
rtl8xxxu_write32(priv, REG_RCR, val32);
- /*
- * Accept all multicast
- */
- rtl8xxxu_write32(priv, REG_MAR, 0xffffffff);
- rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff);
+ if (priv->rtl_chip == RTL8188F) {
+ /* Accept all data frames */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
+
+ /*
+ * Since ADF is removed from RCR, ps-poll will not be indicate to driver,
+ * RxFilterMap should mask ps-poll to gurantee AP mode can rx ps-poll.
+ */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP1, 0x400);
+
+ /* Accept all management frames */
+ rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
+ } else {
+ /*
+ * Accept all multicast
+ */
+ rtl8xxxu_write32(priv, REG_MAR, 0xffffffff);
+ rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff);
+ }
/*
* Init adaptive controls
@@ -4105,46 +4067,28 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val16 = BEACON_DISABLE_TSF_UPDATE | (BEACON_DISABLE_TSF_UPDATE << 8);
rtl8xxxu_write16(priv, REG_BEACON_CTRL, val16);
rtl8xxxu_write16(priv, REG_TBTT_PROHIBIT, 0x6404);
- rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME);
+ if (priv->rtl_chip != RTL8188F)
+ /* Firmware will control REG_DRVERLYINT when power saving is enable, */
+ /* so don't set this register on STA mode. */
+ rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME);
rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME);
rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F);
/*
* Initialize burst parameters
*/
- if (priv->rtl_chip == RTL8723B) {
- /*
- * For USB high speed set 512B packets
- */
- val8 = rtl8xxxu_read8(priv, REG_RXDMA_PRO_8723B);
- val8 &= ~(BIT(4) | BIT(5));
- val8 |= BIT(4);
- val8 |= BIT(1) | BIT(2) | BIT(3);
- rtl8xxxu_write8(priv, REG_RXDMA_PRO_8723B, val8);
-
- /*
- * For USB high speed set 512B packets
- */
- val8 = rtl8xxxu_read8(priv, REG_HT_SINGLE_AMPDU_8723B);
- val8 |= BIT(7);
- rtl8xxxu_write8(priv, REG_HT_SINGLE_AMPDU_8723B, val8);
-
- rtl8xxxu_write16(priv, REG_MAX_AGGR_NUM, 0x0c14);
- rtl8xxxu_write8(priv, REG_AMPDU_MAX_TIME_8723B, 0x5e);
- rtl8xxxu_write32(priv, REG_AGGLEN_LMT, 0xffffffff);
- rtl8xxxu_write8(priv, REG_RX_PKT_LIMIT, 0x18);
- rtl8xxxu_write8(priv, REG_PIFS, 0x00);
- rtl8xxxu_write8(priv, REG_USTIME_TSF_8723B, 0x50);
- rtl8xxxu_write8(priv, REG_USTIME_EDCA, 0x50);
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
- val8 |= BIT(5) | BIT(6);
- rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
- }
+ if (priv->fops->init_burst)
+ priv->fops->init_burst(priv);
if (fops->init_aggregation)
fops->init_aggregation(priv);
+ if (priv->rtl_chip == RTL8188F) {
+ rtl8xxxu_write16(priv, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+ rtl8xxxu_write16(priv, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+ }
+
/*
* Enable CCK and OFDM block
*/
@@ -4163,7 +4107,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
- if (priv->rtl_chip != RTL8192E) {
+ if (priv->rtl_chip != RTL8192E && priv->rtl_chip != RTL8188F) {
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
val8 |= LEDCFG2_DPDT_SELECT;
rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
@@ -4174,7 +4118,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Disable BAR - not sure if this has any effect on USB */
rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
- rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
+ if (priv->rtl_chip != RTL8188F)
+ rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
if (fops->init_statistics)
fops->init_statistics(priv);
@@ -4191,20 +4136,38 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Reset USB mode switch setting
*/
rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
+ } else if (priv->rtl_chip == RTL8188F) {
+ /*
+ * Init GPIO settings for 8188f
+ */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+ val8 &= ~GPIO_MUXCFG_IO_SEL_ENBT;
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
}
- rtl8723a_phy_lc_calibrate(priv);
+ if (priv->rtl_chip == RTL8188F)
+ /* CCK PD */
+ rtl8xxxu_write8(priv, REG_CCK_PD_THRESH, CCK_PD_TYPE1_LV1_TH);
+
+ fops->phy_lc_calibrate(priv);
fops->phy_iq_calibrate(priv);
/*
* This should enable thermal meter
*/
- if (fops->gen2_thermal_meter)
- rtl8xxxu_write_rfreg(priv,
- RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
- else
+ if (fops->gen2_thermal_meter) {
+ if (priv->rtl_chip == RTL8188F) {
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_T_METER_8723B);
+ val32 |= 0x30000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER_8723B, val32);
+ } else {
+ rtl8xxxu_write_rfreg(priv,
+ RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
+ }
+ } else {
rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
+ }
/* Set NAV_UPPER to 30000us */
val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
@@ -4239,7 +4202,39 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val32 &= 0xfff00fff;
val32 |= 0x0007e000;
rtl8xxxu_write32(priv, REG_AFE_MISC, val32);
+
+ /*
+ * 0x824[9] = 0x82C[9] = 0xA80[7] those registers setting
+ * should be equal or CCK RSSI report may be incorrect
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+ priv->cck_agc_report_type = val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_HSSI_PARM2);
+ if (priv->cck_agc_report_type != (bool)(val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
+ if (priv->cck_agc_report_type)
+ val32 |= FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
+ else
+ val32 &= ~FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM2, val32);
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_AGC_RPT);
+ if (priv->cck_agc_report_type)
+ val32 |= AGC_RPT_CCK;
+ else
+ val32 &= ~AGC_RPT_CCK;
+ rtl8xxxu_write32(priv, REG_AGC_RPT, val32);
+ }
+
+ /* Initialise the center frequency offset tracking */
+ if (priv->fops->set_crystal_cap) {
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_CFO_TRACKING);
+ priv->cfo_tracking.atc_status = val32 & CFO_TRACKING_ATC_STATUS;
+ priv->cfo_tracking.adjust = true;
+ priv->cfo_tracking.crystal_cap = priv->default_crystal_cap;
}
+
exit:
return ret;
}
@@ -4389,12 +4384,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect)
{
-#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
/*
- * Barry Day reports this causes issues with 8192eu and 8723bu
- * devices reconnecting. The reason for this is unclear, but
- * until it is better understood, leave the code in place but
- * disabled, so it is not lost.
+ * The firmware turns on the rate control when it knows it's
+ * connected to a network.
*/
struct h2c_cmd h2c;
@@ -4407,7 +4399,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
h2c.media_status_rpt.parm &= ~BIT(0);
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
-#endif
}
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
@@ -4654,7 +4645,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
- rcu_read_unlock();
highest_rate = fls(ramask) - 1;
if (highest_rate < DESC_RATE_MCS0) {
@@ -4679,6 +4669,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
else
rarpt->txrate.bw = RATE_INFO_BW_20;
}
+ rcu_read_unlock();
bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
rarpt->bit_rate = bit_rate;
rarpt->desc_rate = highest_rate;
@@ -5211,7 +5202,8 @@ error:
static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
struct rtl8723au_phy_stats *phy_stats,
- u32 rxmcs)
+ u32 rxmcs, struct ieee80211_hdr *hdr,
+ bool crc_icv_err)
{
if (phy_stats->sgi_en)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -5222,21 +5214,23 @@ static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
*/
u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a;
- switch (cck_agc_rpt & 0xc0) {
- case 0xc0:
- rx_status->signal = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x80:
- rx_status->signal = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x40:
- rx_status->signal = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x00:
- rx_status->signal = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
+ rx_status->signal = priv->fops->cck_rssi(priv, cck_agc_rpt);
} else {
+ bool parse_cfo = priv->fops->set_crystal_cap &&
+ priv->vif &&
+ priv->vif->type == NL80211_IFTYPE_STATION &&
+ priv->vif->cfg.assoc &&
+ !crc_icv_err &&
+ !ieee80211_is_ctl(hdr->frame_control) &&
+ ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+
+ if (parse_cfo) {
+ priv->cfo_tracking.cfo_tail[0] = phy_stats->path_cfotail[0];
+ priv->cfo_tracking.cfo_tail[1] = phy_stats->path_cfotail[1];
+
+ priv->cfo_tracking.packet_count++;
+ }
+
rx_status->signal =
(phy_stats->cck_sig_qual_ofdm_pwdb_all >> 1) - 110;
}
@@ -5319,7 +5313,8 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
rtl8xxxu_queue_rx_urb(priv, rx_urb);
break;
default:
- pr_info("failed to requeue urb %i\n", ret);
+ dev_warn(&priv->udev->dev,
+ "failed to requeue urb with error %i\n", ret);
skb = (struct sk_buff *)rx_urb->urb.context;
dev_kfree_skb(skb);
usb_free_urb(&rx_urb->urb);
@@ -5719,7 +5714,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
if (rx_desc->phy_stats)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
- rx_desc->rxmcs);
+ rx_desc->rxmcs, (struct ieee80211_hdr *)skb->data,
+ rx_desc->crc32 || rx_desc->icverr);
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -5790,7 +5786,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
if (rx_desc->phy_stats)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
- rx_desc->rxmcs);
+ rx_desc->rxmcs, (struct ieee80211_hdr *)skb->data,
+ rx_desc->crc32 || rx_desc->icverr);
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6405,6 +6402,94 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
}
}
+static void rtl8xxxu_set_atc_status(struct rtl8xxxu_priv *priv, bool atc_status)
+{
+ struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking;
+ u32 val32;
+
+ if (atc_status == cfo->atc_status)
+ return;
+
+ cfo->atc_status = atc_status;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_CFO_TRACKING);
+ if (atc_status)
+ val32 |= CFO_TRACKING_ATC_STATUS;
+ else
+ val32 &= ~CFO_TRACKING_ATC_STATUS;
+ rtl8xxxu_write32(priv, REG_OFDM1_CFO_TRACKING, val32);
+}
+
+/* Central frequency offset correction */
+static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking;
+ int cfo_khz_a, cfo_khz_b, cfo_average;
+ int crystal_cap;
+
+ if (!priv->vif || !priv->vif->cfg.assoc) {
+ /* Reset */
+ cfo->adjust = true;
+
+ if (cfo->crystal_cap > priv->default_crystal_cap)
+ priv->fops->set_crystal_cap(priv, cfo->crystal_cap - 1);
+ else if (cfo->crystal_cap < priv->default_crystal_cap)
+ priv->fops->set_crystal_cap(priv, cfo->crystal_cap + 1);
+
+ rtl8xxxu_set_atc_status(priv, true);
+
+ return;
+ }
+
+ if (cfo->packet_count == cfo->packet_count_pre)
+ /* No new information. */
+ return;
+
+ cfo->packet_count_pre = cfo->packet_count;
+
+ /* CFO_tail[1:0] is S(8,7), (num_subcarrier>>7) x 312.5K = CFO value(K Hz) */
+ cfo_khz_a = (int)((cfo->cfo_tail[0] * 3125) / 10) >> 7;
+ cfo_khz_b = (int)((cfo->cfo_tail[1] * 3125) / 10) >> 7;
+
+ if (priv->tx_paths == 1)
+ cfo_average = cfo_khz_a;
+ else
+ cfo_average = (cfo_khz_a + cfo_khz_b) / 2;
+
+ dev_dbg(&priv->udev->dev, "cfo_average: %d\n", cfo_average);
+
+ if (cfo->adjust) {
+ if (abs(cfo_average) < CFO_TH_XTAL_LOW)
+ cfo->adjust = false;
+ } else {
+ if (abs(cfo_average) > CFO_TH_XTAL_HIGH)
+ cfo->adjust = true;
+ }
+
+ /*
+ * TODO: We should return here only if bluetooth is enabled.
+ * See the vendor drivers for how to determine that.
+ */
+ if (priv->has_bluetooth)
+ return;
+
+ if (!cfo->adjust)
+ return;
+
+ crystal_cap = cfo->crystal_cap;
+
+ if (cfo_average > CFO_TH_XTAL_LOW)
+ crystal_cap++;
+ else if (cfo_average < -CFO_TH_XTAL_LOW)
+ crystal_cap--;
+
+ crystal_cap = clamp(crystal_cap, 0, 0x3f);
+
+ priv->fops->set_crystal_cap(priv, crystal_cap);
+
+ rtl8xxxu_set_atc_status(priv, abs(cfo_average) >= CFO_TH_ATC);
+}
+
static void rtl8xxxu_watchdog_callback(struct work_struct *work)
{
struct ieee80211_vif *vif;
@@ -6429,6 +6514,10 @@ static void rtl8xxxu_watchdog_callback(struct work_struct *work)
rcu_read_unlock();
signal = ieee80211_ave_rssi(vif);
+
+ if (priv->fops->set_crystal_cap)
+ rtl8xxxu_track_cfo(priv);
+
rtl8xxxu_refresh_rate_mask(priv, signal, sta);
}
@@ -6561,6 +6650,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
static const struct ieee80211_ops rtl8xxxu_ops = {
.tx = rtl8xxxu_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = rtl8xxxu_add_interface,
.remove_interface = rtl8xxxu_remove_interface,
.config = rtl8xxxu_config,
@@ -6674,6 +6764,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
case 0x8178:
case 0x817f:
case 0x818b:
+ case 0xf179:
untested = 0;
break;
}
@@ -6738,12 +6829,15 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret)
goto err_set_intfdata;
- ret = rtl8xxxu_identify_chip(priv);
+ ret = priv->fops->identify_chip(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to identify chip\n");
goto err_set_intfdata;
}
+ hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1;
+ hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1;
+
ret = rtl8xxxu_read_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
@@ -6808,6 +6902,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
* The firmware handles rate control
*/
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
@@ -6886,6 +6981,9 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8723bu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa611, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723bu_fops},
+/* RTL8188FU */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xf179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188fu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 438b65ba9640..3e79efdfb4c2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -68,6 +68,8 @@
#define REG_SPS_OCP_CFG 0x0018
#define REG_8192E_LDOV12_CTRL 0x0014
#define REG_RSV_CTRL 0x001c
+#define RSV_CTRL_WLOCK_1C BIT(5)
+#define RSV_CTRL_DIS_PRST BIT(6)
#define REG_RF_CTRL 0x001f
#define RF_ENABLE BIT(0)
@@ -135,6 +137,7 @@
#define REG_CAL_TIMER 0x003c
#define REG_ACLK_MON 0x003e
#define REG_GPIO_MUXCFG 0x0040
+#define GPIO_MUXCFG_IO_SEL_ENBT BIT(5)
#define REG_GPIO_IO_SEL 0x0042
#define REG_MAC_PINMUX_CFG 0x0043
#define REG_GPIO_PIN_CTRL 0x0044
@@ -312,7 +315,6 @@
#define SYS_CFG_SPS_SEL BIT(24) /* 1:LDO regulator mode;
0:Switching regulator mode*/
#define SYS_CFG_CHIP_VERSION_MASK 0xf000 /* Bit 12 - 15 */
-#define SYS_CFG_CHIP_VERSION_SHIFT 12
#define REG_GPIO_OUTSTS 0x00f4 /* For RTL8723 only. */
#define GPIO_EFS_HCI_SEL (BIT(0) | BIT(1))
@@ -391,6 +393,7 @@
#define REG_CPWM 0x012f
#define REG_FWIMR 0x0130
#define REG_FWISR 0x0134
+#define REG_FTIMR 0x0138
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_PKTBUF_DBG_DATA_L 0x0144
#define REG_PKTBUF_DBG_DATA_H 0x0148
@@ -440,6 +443,9 @@
#define REG_FIFOPAGE 0x0204
#define REG_TDECTRL 0x0208
+
+#define REG_DWBCN0_CTRL_8188F REG_TDECTRL
+
#define REG_TXDMA_OFFSET_CHK 0x020c
#define TXDMA_OFFSET_DROP_DATA_EN BIT(9)
#define REG_TXDMA_STATUS 0x0210
@@ -467,6 +473,9 @@
/* Presumably only found on newer chips such as 8723bu */
#define REG_RX_DMA_CTRL_8723B 0x0286
#define REG_RXDMA_PRO_8723B 0x0290
+#define RXDMA_PRO_DMA_MODE BIT(1) /* Set to 0x1. */
+#define RXDMA_PRO_DMA_BURST_CNT GENMASK(3, 2) /* Set to 0x3. */
+#define RXDMA_PRO_DMA_BURST_SIZE GENMASK(5, 4) /* Set to 0x1. */
#define REG_RF_BB_CMD_ADDR 0x02c0
#define REG_RF_BB_CMD_DATA 0x02c4
@@ -572,6 +581,7 @@
#define REG_STBC_SETTING 0x04c4
#define REG_QUEUE_CTRL 0x04c6
#define REG_HT_SINGLE_AMPDU_8723B 0x04c7
+#define HT_SINGLE_AMPDU_ENABLE BIT(7)
#define REG_PROT_MODE_CTRL 0x04c8
#define REG_MAX_AGGR_NUM 0x04ca
#define REG_RTS_MAX_AGGR_NUM 0x04cb
@@ -925,6 +935,7 @@
#define REG_FPGA0_XA_LSSI_READBACK 0x08a0 /* Tranceiver LSSI Readback */
#define REG_FPGA0_XB_LSSI_READBACK 0x08a4
+#define REG_FPGA0_PSD_REPORT 0x08b4
#define REG_HSPI_XA_READBACK 0x08b8 /* Transceiver A HSPI read */
#define REG_HSPI_XB_READBACK 0x08bc /* Transceiver B HSPI read */
@@ -936,6 +947,7 @@
#define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */
#define REG_RFE_BUFFER 0x0944 /* 8723BU */
#define REG_S0S1_PATH_SWITCH 0x0948 /* 8723BU */
+#define REG_OFDM_RX_DFIR 0x954
#define REG_CCK0_SYSTEM 0x0a00
#define CCK0_SIDEBAND BIT(4)
@@ -946,6 +958,16 @@
#define CCK0_AFE_RX_ANT_A 0
#define CCK0_AFE_RX_ANT_B (BIT(24) | BIT(26))
+#define REG_CCK_PD_THRESH 0x0a0a
+#define CCK_PD_TYPE1_LV0_TH 0x40
+#define CCK_PD_TYPE1_LV1_TH 0x83
+#define CCK_PD_TYPE1_LV2_TH 0xcd
+#define CCK_PD_TYPE1_LV3_TH 0xdd
+#define CCK_PD_TYPE1_LV4_TH 0xed
+
+#define REG_AGC_RPT 0xa80
+#define AGC_RPT_CCK BIT(7)
+
#define REG_CONFIG_ANT_A 0x0b68
#define REG_CONFIG_ANT_B 0x0b6c
@@ -965,6 +987,7 @@
#define REG_OFDM0_FA_RSTC 0x0c0c
+#define REG_OFDM0_XA_RX_AFE 0x0c10
#define REG_OFDM0_XA_RX_IQ_IMBALANCE 0x0c14
#define REG_OFDM0_XB_RX_IQ_IMBALANCE 0x0c1c
@@ -1011,6 +1034,10 @@
#define OFDM_LSTF_MASK 0x70000000
#define REG_OFDM1_TRX_PATH_ENABLE 0x0d04
+#define REG_OFDM1_CFO_TRACKING 0x0d2c
+#define CFO_TRACKING_ATC_STATUS BIT(11)
+#define REG_OFDM1_CSI_FIX_MASK1 0x0d40
+#define REG_OFDM1_CSI_FIX_MASK2 0x0d44
#define REG_TX_AGC_A_RATE18_06 0x0e00
#define REG_TX_AGC_A_RATE54_24 0x0e04
@@ -1202,6 +1229,7 @@
#define RF6052_REG_UNKNOWN_43 0x43
#define RF6052_REG_UNKNOWN_55 0x55
#define RF6052_REG_UNKNOWN_56 0x56
+#define RF6052_REG_RXG_MIX_SWBW 0x87
#define RF6052_REG_S0S1 0xb0
#define RF6052_REG_UNKNOWN_DF 0xdf
#define RF6052_REG_UNKNOWN_ED 0xed
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index ca01270944fe..6f10727cdb94 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1912,6 +1912,7 @@ const struct ieee80211_ops rtl_ops = {
.start = rtl_op_start,
.stop = rtl_op_stop,
.tx = rtl_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = rtl_op_add_interface,
.remove_interface = rtl_op_remove_interface,
.change_interface = rtl_op_change_interface,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 8043d819fb85..a182cdeb58e2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -997,7 +997,6 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 read_point, write_point;
bool ret = false;
- static u8 stop_report_cnt;
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
{
@@ -1038,13 +1037,6 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
rtlpriv->psc.rfoff_reason > RF_CHANGE_BY_PS)
ret = true;
- if (hw_queue < BEACON_QUEUE) {
- if (!ret)
- stop_report_cnt++;
- else
- stop_report_cnt = 0;
- }
-
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 0b5f903c0f36..5e760c884f89 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -311,10 +311,10 @@ EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 *h2c)
{
+ struct rtw_h2c_cmd *h2c_cmd = (struct rtw_h2c_cmd *)h2c;
u8 box;
u8 box_state;
u32 box_reg, box_ex_reg;
- int idx;
int ret;
rtw_dbg(rtwdev, RTW_DBG_FW,
@@ -356,10 +356,8 @@ static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
goto out;
}
- for (idx = 0; idx < 4; idx++)
- rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
- for (idx = 0; idx < 4; idx++)
- rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
+ rtw_write32(rtwdev, box_ex_reg, le32_to_cpu(h2c_cmd->msg_ext));
+ rtw_write32(rtwdev, box_reg, le32_to_cpu(h2c_cmd->msg));
if (++rtwdev->h2c.last_box_num >= 4)
rtwdev->h2c.last_box_num = 0;
@@ -824,6 +822,16 @@ void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_set_recover_bt_device(struct rtw_dev *rtwdev)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RECOVER_BT_DEV);
+ SET_RECOVER_BT_DEV_EN(h2c_pkt, 1);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index a5a965803a3c..0a386e6d6e0d 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -81,6 +81,11 @@ struct rtw_c2h_adaptivity {
u8 option;
} __packed;
+struct rtw_h2c_cmd {
+ __le32 msg;
+ __le32 msg_ext;
+} __packed;
+
enum rtw_rsvd_packet_type {
RSVD_BEACON,
RSVD_DUMMY,
@@ -550,6 +555,8 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_AOAC_GLOBAL_INFO 0x82
#define H2C_CMD_NLO_INFO 0x8C
+#define H2C_CMD_RECOVER_BT_DEV 0xD1
+
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
@@ -749,6 +756,9 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_RECOVER_BT_DEV_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+
#define GET_FW_DUMP_LEN(_header) \
le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(15, 0))
#define GET_FW_DUMP_SEQ(_header) \
@@ -838,6 +848,7 @@ void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
u8 group_key_enc);
void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_recover_bt_device(struct rtw_dev *rtwdev);
void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
struct cfg80211_ssid *ssid);
void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 52076e89d59a..c7e64f7036ac 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -906,7 +906,8 @@ out:
return ret;
}
-int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+static
+int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
{
if (rtw_chip_wcpu_11n(rtwdev))
return __rtw_download_firmware_legacy(rtwdev, fw);
@@ -914,6 +915,21 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
return __rtw_download_firmware(rtwdev, fw);
}
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+{
+ int ret;
+
+ ret = _rtw_download_firmware(rtwdev, fw);
+ if (ret)
+ return ret;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_fw_set_recover_bt_device(rtwdev);
+
+ return 0;
+}
+
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 93e09400aac4..2b20cf8bbf3a 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -19,6 +19,9 @@ config RTW89_PCI
config RTW89_8852A
tristate
+config RTW89_8852B
+ tristate
+
config RTW89_8852C
tristate
@@ -33,6 +36,17 @@ config RTW89_8852AE
802.11ax PCIe wireless network (Wi-Fi 6) adapter
+config RTW89_8852BE
+ tristate "Realtek 8852BE PCI wireless network (Wi-Fi 6) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8852B
+ help
+ Select this option will enable support for 8852BE chipset
+
+ 802.11ax PCIe wireless network (Wi-Fi 6) adapter
+
config RTW89_8852CE
tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter"
depends on PCI
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index a87f2aff4def..2dc48fa10c6b 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -15,6 +15,8 @@ rtw89_core-y += core.o \
chan.o \
ser.o
+rtw89_core-$(CONFIG_PM) += wow.o
+
obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
rtw89_8852a-objs := rtw8852a.o \
rtw8852a_table.o \
@@ -24,6 +26,15 @@ rtw89_8852a-objs := rtw8852a.o \
obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
rtw89_8852ae-objs := rtw8852ae.o
+obj-$(CONFIG_RTW89_8852B) += rtw89_8852b.o
+rtw89_8852b-objs := rtw8852b.o \
+ rtw8852b_table.o \
+ rtw8852b_rfk.o \
+ rtw8852b_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852BE) += rtw89_8852be.o
+rtw89_8852be-objs := rtw8852be.o
+
obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o
rtw89_8852c-objs := rtw8852c.o \
rtw8852c_table.o \
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index a4f61c2f6512..90596806bc93 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -4,6 +4,7 @@
#include "chan.h"
#include "debug.h"
+#include "util.h"
static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
u8 center_chan)
@@ -108,8 +109,8 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
const struct rtw89_chan *new)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_chan *chan = &hal->chan[idx];
- struct rtw89_chan_rcd *rcd = &hal->chan_rcd[idx];
+ struct rtw89_chan *chan = &hal->sub[idx].chan;
+ struct rtw89_chan_rcd *rcd = &hal->sub[idx].rcd;
bool band_changed;
rcd->prev_primary_channel = chan->primary_channel;
@@ -127,7 +128,7 @@ static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
- hal->chandef[idx] = *chandef;
+ hal->sub[idx].chandef = *chandef;
if (from_stack)
set_bit(idx, hal->entity_map);
@@ -195,6 +196,7 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
rtw89_set_channel(rtwdev);
cfg->idx = idx;
+ hal->sub[idx].cfg = cfg;
return 0;
}
@@ -203,8 +205,34 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_vif *rtwvif;
+ u8 drop, roll;
- clear_bit(cfg->idx, hal->entity_map);
+ drop = cfg->idx;
+ if (drop != RTW89_SUB_ENTITY_0)
+ goto out;
+
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, drop + 1);
+
+ /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
+ if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ goto out;
+
+ /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
+ * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ */
+ hal->sub[roll].cfg->idx = RTW89_SUB_ENTITY_0;
+ hal->sub[RTW89_SUB_ENTITY_0] = hal->sub[roll];
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (rtwvif->sub_entity_idx == roll)
+ rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
+ }
+
+ drop = roll;
+
+out:
+ clear_bit(drop, hal->entity_map);
rtw89_set_channel(rtwdev);
}
@@ -225,6 +253,9 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct ieee80211_chanctx_conf *ctx)
{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+
+ rtwvif->sub_entity_idx = cfg->idx;
return 0;
}
@@ -232,4 +263,5 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct ieee80211_chanctx_conf *ctx)
{
+ rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index bbdfa9ac203c..f21c73310fdb 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -1809,13 +1809,18 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
struct rtw89_btc_rf_trx_para para;
u32 wl_stb_chg = 0;
u8 level_id = 0;
if (!dm->freerun) {
- dm->trx_para_level = 0;
- chip->ops->btc_bt_aci_imp(rtwdev);
+ /* fix LNA2 = level-5 for BT ACI issue at BTG */
+ if ((btc->dm.wl_btg_rx && b->profile_cnt.now != 0) ||
+ dm->bt_only == 1)
+ dm->trx_para_level = 1;
+ else
+ dm->trx_para_level = 0;
}
level_id = (u8)dm->trx_para_level;
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index bc2994865372..5ab95250755d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -171,7 +171,7 @@ bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitr
return true;
}
-static struct ieee80211_supported_band rtw89_sband_2ghz = {
+static const struct ieee80211_supported_band rtw89_sband_2ghz = {
.band = NL80211_BAND_2GHZ,
.channels = rtw89_channels_2ghz,
.n_channels = ARRAY_SIZE(rtw89_channels_2ghz),
@@ -181,7 +181,7 @@ static struct ieee80211_supported_band rtw89_sband_2ghz = {
.vht_cap = {0},
};
-static struct ieee80211_supported_band rtw89_sband_5ghz = {
+static const struct ieee80211_supported_band rtw89_sband_5ghz = {
.band = NL80211_BAND_5GHZ,
.channels = rtw89_channels_5ghz,
.n_channels = ARRAY_SIZE(rtw89_channels_5ghz),
@@ -193,7 +193,7 @@ static struct ieee80211_supported_band rtw89_sband_5ghz = {
.vht_cap = {0},
};
-static struct ieee80211_supported_band rtw89_sband_6ghz = {
+static const struct ieee80211_supported_band rtw89_sband_6ghz = {
.band = NL80211_BAND_6GHZ,
.channels = rtw89_channels_6ghz,
.n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
@@ -1196,7 +1196,11 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr,
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
/* sign conversion for S(12,2) */
- cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_CFO(addr), 11);
+ if (rtwdev->chip->cfo_src_fd)
+ cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_FD_CFO(addr), 11);
+ else
+ cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_PREMB_CFO(addr), 11);
+
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
}
@@ -1255,6 +1259,9 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
if (phy_ppdu->ie < RTW89_CCK_PKT)
return -EINVAL;
+ if (!phy_ppdu->to_self)
+ return 0;
+
pos = (u8 *)phy_ppdu->buf + PHY_STS_HDR_LEN;
end = (u8 *)phy_ppdu->buf + phy_ppdu->len;
while (pos < end) {
@@ -1398,6 +1405,9 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const u8 *bssid = iter_data->bssid;
+ if (!vif->bss_conf.bssid)
+ return;
+
if (ieee80211_is_trigger(hdr->frame_control)) {
rtw89_stats_trigger_frame(rtwdev, vif, skb);
return;
@@ -1470,6 +1480,27 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
rx_status->rate_idx -= 4;
}
+static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ static const struct ieee80211_radiotap_he known_he = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he;
+
+ if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR))
+ return;
+
+ if (rx_status->encoding == RX_ENC_HE) {
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+ he = skb_push(skb, sizeof(*he));
+ *he = known_he;
+ }
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -1484,6 +1515,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
@@ -2201,6 +2233,9 @@ static void rtw89_track_work(struct work_struct *work)
track_work.work);
bool tfc_changed;
+ if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags))
+ return;
+
mutex_lock(&rtwdev->mutex);
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
@@ -2227,6 +2262,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_phy_ra_update(rtwdev);
rtw89_phy_cfo_track(rtwdev);
rtw89_phy_tx_path_div_track(rtwdev);
+ rtw89_phy_ul_tb_ctrl_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -2375,6 +2411,8 @@ void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
rtwvif->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
break;
+ case NL80211_IFTYPE_MONITOR:
+ break;
default:
WARN_ON(1);
break;
@@ -2410,6 +2448,8 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
RTW89_MAX_MAC_ID_NUM);
+ if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
+ return -ENOSPC;
}
return 0;
@@ -2527,7 +2567,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
}
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
@@ -2548,6 +2588,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template);
+ rtw89_phy_ul_tb_assoc(rtwdev, rtwvif);
}
return ret;
@@ -2933,6 +2974,41 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
mutex_unlock(&rtwdev->mutex);
}
+int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
+{
+ struct completion *cmpl = &wait->completion;
+ unsigned long timeout;
+ unsigned int cur;
+
+ cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
+ if (cur != RTW89_WAIT_COND_IDLE)
+ return -EBUSY;
+
+ timeout = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
+ if (timeout == 0) {
+ atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
+ return -ETIMEDOUT;
+ }
+
+ if (wait->data.err)
+ return -EFAULT;
+
+ return 0;
+}
+
+void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
+ const struct rtw89_completion_data *data)
+{
+ unsigned int cur;
+
+ cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
+ if (cur != cond)
+ return;
+
+ wait->data = *data;
+ complete(&wait->completion);
+}
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2957,7 +3033,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
return ret;
rtw89_phy_init_bb_reg(rtwdev);
- rtw89_phy_init_rf_reg(rtwdev);
+ rtw89_phy_init_rf_reg(rtwdev, false);
rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
@@ -3037,6 +3113,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
continue;
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
+ INIT_LIST_HEAD(&rtwdev->wow.pkt_list);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -3053,6 +3130,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
mutex_init(&rtwdev->rf_mutex);
rtwdev->total_sta_assoc = 0;
+ rtw89_init_wait(&rtwdev->mcc.wait);
+
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
skb_queue_head_init(&rtwdev->c2h_queue);
@@ -3251,6 +3330,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -3268,6 +3348,10 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
+#endif
+
hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index db041b32a8c2..2badb96d2ae3 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -35,6 +35,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
#define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR)
+#define RTW89_RADIOTAP_ROOM ALIGN(sizeof(struct ieee80211_radiotap_he), 64)
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
@@ -84,6 +85,7 @@ enum rtw89_subband {
RTW89_CH_6G_BAND_IDX7, /* Ultra-high */
RTW89_SUBBAND_NR,
+ RTW89_SUBBAND_2GHZ_5GHZ_NR = RTW89_CH_5G_BAND_4 + 1,
};
enum rtw89_gain_offset {
@@ -177,7 +179,9 @@ enum rtw89_upd_mode {
RTW89_ROLE_REMOVE,
RTW89_ROLE_TYPE_CHANGE,
RTW89_ROLE_INFO_CHANGE,
- RTW89_ROLE_CON_DISCONN
+ RTW89_ROLE_CON_DISCONN,
+ RTW89_ROLE_BAND_SW,
+ RTW89_ROLE_FW_RESTORE,
};
enum rtw89_self_role {
@@ -476,6 +480,20 @@ enum rtw89_regulation_type {
RTW89_REGD_NUM,
};
+enum rtw89_fw_pkt_ofld_type {
+ RTW89_PKT_OFLD_TYPE_PROBE_RSP = 0,
+ RTW89_PKT_OFLD_TYPE_PS_POLL = 1,
+ RTW89_PKT_OFLD_TYPE_NULL_DATA = 2,
+ RTW89_PKT_OFLD_TYPE_QOS_NULL = 3,
+ RTW89_PKT_OFLD_TYPE_CTS2SELF = 4,
+ RTW89_PKT_OFLD_TYPE_ARP_RSP = 5,
+ RTW89_PKT_OFLD_TYPE_NDP = 6,
+ RTW89_PKT_OFLD_TYPE_EAPOL_KEY = 7,
+ RTW89_PKT_OFLD_TYPE_SA_QUERY = 8,
+ RTW89_PKT_OFLD_TYPE_PROBE_REQ = 12,
+ RTW89_PKT_OFLD_TYPE_NUM,
+};
+
struct rtw89_txpwr_byrate {
s8 cck[RTW89_RATE_CCK_MAX];
s8 ofdm[RTW89_RATE_OFDM_MAX];
@@ -490,6 +508,8 @@ enum rtw89_bandwidth_section_num {
RTW89_BW80_SEC_NUM = 2,
};
+#define RTW89_TXPWR_LMT_PAGE_SIZE 40
+
struct rtw89_txpwr_limit {
s8 cck_20m[RTW89_BF_NUM];
s8 cck_40m[RTW89_BF_NUM];
@@ -504,6 +524,8 @@ struct rtw89_txpwr_limit {
#define RTW89_RU_SEC_NUM 8
+#define RTW89_TXPWR_LMT_RU_PAGE_SIZE 24
+
struct rtw89_txpwr_limit_ru {
s8 ru26[RTW89_RU_SEC_NUM];
s8 ru52[RTW89_RU_SEC_NUM];
@@ -631,6 +653,13 @@ enum rtw89_sc_offset {
RTW89_SC_40_LOWER = 10,
};
+enum rtw89_wow_flags {
+ RTW89_WOW_FLAG_EN_MAGIC_PKT,
+ RTW89_WOW_FLAG_EN_REKEY_PKT,
+ RTW89_WOW_FLAG_EN_DISCONNECT,
+ RTW89_WOW_FLAG_NUM,
+};
+
struct rtw89_chan {
u8 channel;
u8 primary_channel;
@@ -2192,6 +2221,7 @@ struct rtw89_sta {
struct rtw89_efuse {
bool valid;
+ bool power_k_valid;
u8 xtal_cap;
u8 addr[ETH_ALEN];
u8 rfe_type;
@@ -2210,6 +2240,8 @@ struct rtw89_phy_rate_pattern {
struct rtw89_vif {
struct list_head list;
struct rtw89_dev *rtwdev;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+
u8 mac_id;
u8 port;
u8 mac_addr[ETH_ALEN];
@@ -2232,6 +2264,8 @@ struct rtw89_vif {
bool wowlan_magic;
bool is_hesta;
bool last_a_ctrl;
+ bool dyn_tb_bedge_en;
+ u8 def_tri_idx;
struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
@@ -2280,6 +2314,16 @@ struct rtw89_hci_ops {
*/
void (*recovery_start)(struct rtw89_dev *rtwdev);
void (*recovery_complete)(struct rtw89_dev *rtwdev);
+
+ void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
+ void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
+ void (*ctrl_trxhci)(struct rtw89_dev *rtwdev, bool enable);
+ int (*poll_txdma_ch)(struct rtw89_dev *rtwdev);
+ void (*clr_idx_all)(struct rtw89_dev *rtwdev);
+ void (*clear)(struct rtw89_dev *rtwdev, struct pci_dev *pdev);
+ void (*disable_intr)(struct rtw89_dev *rtwdev);
+ void (*enable_intr)(struct rtw89_dev *rtwdev);
+ int (*rst_bdram)(struct rtw89_dev *rtwdev);
};
struct rtw89_hci_info {
@@ -2357,7 +2401,6 @@ struct rtw89_chip_ops {
void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state);
void (*btc_set_wl_txpwr_ctrl)(struct rtw89_dev *rtwdev, u32 txpwr_val);
s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val);
- void (*btc_bt_aci_imp)(struct rtw89_dev *rtwdev);
void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
void (*btc_set_policy)(struct rtw89_dev *rtwdev, u16 policy_type);
@@ -2384,6 +2427,7 @@ enum rtw89_dma_ch {
enum rtw89_qta_mode {
RTW89_QTA_SCC,
RTW89_QTA_DLFW,
+ RTW89_QTA_WOW,
/* keep last */
RTW89_QTA_INVALID,
@@ -2607,6 +2651,11 @@ struct rtw89_dig_regs {
struct rtw89_reg_def p1_s20_pagcugc_en;
};
+struct rtw89_phy_ul_tb_info {
+ bool dyn_tb_tri_en;
+ u8 def_if_bandedge;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
@@ -2618,10 +2667,13 @@ struct rtw89_chip_info {
u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
+ u8 wde_qempty_acq_num;
+ u8 wde_qempty_mgq_sel;
u32 rf_base_addr[2];
u8 support_chanctx_num;
u8 support_bands;
bool support_bw160;
+ bool support_ul_tb_ctrl;
bool hw_sec_hdr;
u8 rf_path_num;
u8 tx_nss;
@@ -2714,11 +2766,13 @@ struct rtw89_chip_info {
u32 c2h_ctrl_reg;
const u32 *c2h_regs;
const struct rtw89_page_regs *page_regs;
+ bool cfo_src_fd;
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
const struct rtw89_imr_info *imr_info;
const struct rtw89_rrsr_cfgs *rrsr_cfgs;
u32 dma_ch_mask;
+ const struct wiphy_wowlan_support *wowlan_stub;
};
union rtw89_bus_info {
@@ -2760,6 +2814,28 @@ struct rtw89_mac_info {
u8 cpwm_seq_num;
};
+#define RTW89_COMPLETION_BUF_SIZE 24
+#define RTW89_WAIT_COND_IDLE UINT_MAX
+
+struct rtw89_completion_data {
+ bool err;
+ u8 buf[RTW89_COMPLETION_BUF_SIZE];
+};
+
+struct rtw89_wait_info {
+ atomic_t cond;
+ struct completion completion;
+ struct rtw89_completion_data data;
+};
+
+#define RTW89_WAIT_FOR_COND_TIMEOUT msecs_to_jiffies(100)
+
+static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
+{
+ init_completion(&wait->completion);
+ atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
+}
+
enum rtw89_fw_type {
RTW89_FW_NORMAL = 1,
RTW89_FW_WOWLAN = 3,
@@ -2879,6 +2955,13 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_SCC,
};
+struct rtw89_sub_entity {
+ struct cfg80211_chan_def chandef;
+ struct rtw89_chan chan;
+ struct rtw89_chan_rcd rcd;
+ struct rtw89_chanctx_cfg *cfg;
+};
+
struct rtw89_hal {
u32 rx_fltr;
u8 cv;
@@ -2892,13 +2975,10 @@ struct rtw89_hal {
bool support_igi;
DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY);
- struct cfg80211_chan_def chandef[NUM_OF_RTW89_SUB_ENTITY];
+ struct rtw89_sub_entity sub[NUM_OF_RTW89_SUB_ENTITY];
bool entity_active;
enum rtw89_entity_mode entity_mode;
-
- struct rtw89_chan chan[NUM_OF_RTW89_SUB_ENTITY];
- struct rtw89_chan_rcd chan_rcd[NUM_OF_RTW89_SUB_ENTITY];
};
#define RTW89_MAX_MAC_ID_NUM 128
@@ -2915,6 +2995,9 @@ enum rtw89_flags {
RTW89_FLAG_LOW_POWER_MODE,
RTW89_FLAG_INACTIVE_PS,
RTW89_FLAG_CRASH_SIMULATING,
+ RTW89_FLAG_WOWLAN,
+ RTW89_FLAG_FORBIDDEN_TRACK_WROK,
+ RTW89_FLAG_CHANGING_INTERFACE,
NUM_OF_RTW89_FLAGS,
};
@@ -2943,6 +3026,7 @@ struct rtw89_pkt_drop_params {
u8 port;
u8 mbssid;
bool tf_trs;
+ u32 macid_band_sel[4];
};
struct rtw89_pkt_stat {
@@ -2976,7 +3060,7 @@ struct rtw89_dack_info {
#define RTW89_IQK_CHS_NR 2
#define RTW89_IQK_PATH_NR 4
-struct rtw89_mcc_info {
+struct rtw89_rfk_mcc_info {
u8 ch[RTW89_IQK_CHS_NR];
u8 band[RTW89_IQK_CHS_NR];
u8 table_idx;
@@ -3044,6 +3128,7 @@ struct rtw89_dpk_bkup_para {
struct rtw89_dpk_info {
bool is_dpk_enable;
bool is_dpk_reload_en;
+ u8 dpk_gs[RTW89_PHY_MAX];
u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
@@ -3159,6 +3244,14 @@ struct rtw89_cfo_tracking_info {
u8 lock_cnt;
};
+enum rtw89_tssi_alimk_band {
+ TSSI_ALIMK_2G = 0,
+ TSSI_ALIMK_5GL,
+ TSSI_ALIMK_5GM,
+ TSSI_ALIMK_5GH,
+ TSSI_ALIMK_MAX
+};
+
/* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */
#define TSSI_TRIM_CH_GROUP_NUM 8
#define TSSI_TRIM_CH_GROUP_NUM_6G 16
@@ -3169,6 +3262,8 @@ struct rtw89_cfo_tracking_info {
#define TSSI_MCS_6G_CH_GROUP_NUM 32
#define TSSI_MCS_CH_GROUP_NUM \
(TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM)
+#define TSSI_MAX_CH_NUM 67
+#define TSSI_ALIMK_VALUE_NUM 8
struct rtw89_tssi_info {
u8 thermal[RF_PATH_MAX];
@@ -3181,6 +3276,11 @@ struct rtw89_tssi_info {
bool tssi_tracking_check[RF_PATH_MAX];
u8 default_txagc_offset[RF_PATH_MAX];
u32 base_thermal[RF_PATH_MAX];
+ bool check_backup_aligmk[RF_PATH_MAX][TSSI_MAX_CH_NUM];
+ u32 alignment_backup_by_ch[RF_PATH_MAX][TSSI_MAX_CH_NUM][TSSI_ALIMK_VALUE_NUM];
+ u32 alignment_value[RF_PATH_MAX][TSSI_ALIMK_MAX][TSSI_ALIMK_VALUE_NUM];
+ bool alignment_done[RF_PATH_MAX][TSSI_ALIMK_MAX];
+ u32 tssi_alimk_time;
};
struct rtw89_power_trim_info {
@@ -3421,8 +3521,40 @@ struct rtw89_phy_bb_gain_info {
struct rtw89_phy_efuse_gain {
bool offset_valid;
+ bool comp_valid;
s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */
s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */
+ s8 rssi_base[RTW89_PHY_MAX]; /* S(8, 4) */
+ s8 comp[RF_PATH_MAX][RTW89_SUBBAND_NR]; /* S(8, 0) */
+};
+
+#define RTW89_MAX_PATTERN_NUM 18
+#define RTW89_MAX_PATTERN_MASK_SIZE 4
+#define RTW89_MAX_PATTERN_SIZE 128
+
+struct rtw89_wow_cam_info {
+ bool r_w;
+ u8 idx;
+ u32 mask[RTW89_MAX_PATTERN_MASK_SIZE];
+ u16 crc;
+ bool negative_pattern_match;
+ bool skip_mac_hdr;
+ bool uc;
+ bool mc;
+ bool bc;
+ bool valid;
+};
+
+struct rtw89_wow_param {
+ struct ieee80211_vif *wow_vif;
+ DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM);
+ struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM];
+ u8 pattern_cnt;
+ struct list_head pkt_list;
+};
+
+struct rtw89_mcc_info {
+ struct rtw89_wait_info wait;
};
struct rtw89_dev {
@@ -3435,6 +3567,7 @@ struct rtw89_dev {
const struct rtw89_chip_info *chip;
const struct rtw89_pci_info *pci_info;
struct rtw89_hal hal;
+ struct rtw89_mcc_info mcc;
struct rtw89_mac_info mac;
struct rtw89_fw_info fw;
struct rtw89_hci_info hci;
@@ -3478,7 +3611,7 @@ struct rtw89_dev {
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
- struct rtw89_mcc_info mcc;
+ struct rtw89_rfk_mcc_info rfk_mcc;
struct rtw89_lck_info lck;
struct rtw89_rx_dck_info rx_dck;
bool is_tssi_mode[RF_PATH_MAX];
@@ -3495,6 +3628,7 @@ struct rtw89_dev {
struct rtw89_phy_ch_info ch_info;
struct rtw89_phy_bb_gain_info bb_gain;
struct rtw89_phy_efuse_gain efuse_gain;
+ struct rtw89_phy_ul_tb_info ul_tb_info;
struct delayed_work track_work;
struct delayed_work coex_act1_work;
@@ -3513,6 +3647,8 @@ struct rtw89_dev {
enum rtw89_ps_mode ps_mode;
bool lps_enabled;
+ struct rtw89_wow_param wow;
+
/* napi structure */
struct net_device netdev;
struct napi_struct napi;
@@ -3595,6 +3731,66 @@ static inline void rtw89_hci_recovery_complete(struct rtw89_dev *rtwdev)
rtwdev->hci.ops->recovery_complete(rtwdev);
}
+static inline void rtw89_hci_enable_intr(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->enable_intr)
+ rtwdev->hci.ops->enable_intr(rtwdev);
+}
+
+static inline void rtw89_hci_disable_intr(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->disable_intr)
+ rtwdev->hci.ops->disable_intr(rtwdev);
+}
+
+static inline void rtw89_hci_ctrl_txdma_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (rtwdev->hci.ops->ctrl_txdma_ch)
+ rtwdev->hci.ops->ctrl_txdma_ch(rtwdev, enable);
+}
+
+static inline void rtw89_hci_ctrl_txdma_fw_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (rtwdev->hci.ops->ctrl_txdma_fw_ch)
+ rtwdev->hci.ops->ctrl_txdma_fw_ch(rtwdev, enable);
+}
+
+static inline void rtw89_hci_ctrl_trxhci(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (rtwdev->hci.ops->ctrl_trxhci)
+ rtwdev->hci.ops->ctrl_trxhci(rtwdev, enable);
+}
+
+static inline int rtw89_hci_poll_txdma_ch(struct rtw89_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtwdev->hci.ops->poll_txdma_ch)
+ ret = rtwdev->hci.ops->poll_txdma_ch(rtwdev);
+ return ret;
+}
+
+static inline void rtw89_hci_clr_idx_all(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->clr_idx_all)
+ rtwdev->hci.ops->clr_idx_all(rtwdev);
+}
+
+static inline int rtw89_hci_rst_bdram(struct rtw89_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtwdev->hci.ops->rst_bdram)
+ ret = rtwdev->hci.ops->rst_bdram(rtwdev);
+ return ret;
+}
+
+static inline void rtw89_hci_clear(struct rtw89_dev *rtwdev, struct pci_dev *pdev)
+{
+ if (rtwdev->hci.ops->clear)
+ rtwdev->hci.ops->clear(rtwdev, pdev);
+}
+
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read8(rtwdev, addr);
@@ -3948,7 +4144,7 @@ const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->chandef[idx];
+ return &hal->sub[idx].chandef;
}
static inline
@@ -3957,7 +4153,7 @@ const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->chan[idx];
+ return &hal->sub[idx].chan;
}
static inline
@@ -3966,7 +4162,7 @@ const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->chan_rcd[idx];
+ return &hal->sub[idx].rcd;
}
static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
@@ -4221,6 +4417,23 @@ static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev,
return &fw_info->normal;
}
+static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) {
+ skb = dev_alloc_skb(length + RTW89_RADIOTAP_ROOM);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, RTW89_RADIOTAP_ROOM);
+ return skb;
+ }
+
+ return dev_alloc_skb(length);
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -4289,6 +4502,9 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev,
void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
+int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
+void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
+ const struct rtw89_completion_data *data);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
void rtw89_core_update_beacon_work(struct work_struct *work);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 730e83d54257..8297e35bfa52 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -8,6 +8,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "pci.h"
#include "ps.h"
#include "reg.h"
#include "sar.h"
@@ -51,6 +52,22 @@ struct rtw89_debugfs_priv {
};
};
+static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
+ [RATE_INFO_BW_20] = 20,
+ [RATE_INFO_BW_40] = 40,
+ [RATE_INFO_BW_80] = 80,
+ [RATE_INFO_BW_160] = 160,
+ [RATE_INFO_BW_320] = 320,
+};
+
+static u16 rtw89_rate_info_bw_to_mhz(enum rate_info_bw bw)
+{
+ if (bw < ARRAY_SIZE(rtw89_rate_info_bw_to_mhz_map))
+ return rtw89_rate_info_bw_to_mhz_map[bw];
+
+ return 0;
+}
+
static int rtw89_debugfs_single_show(struct seq_file *m, void *v)
{
struct rtw89_debugfs_priv *debugfs_priv = m->private;
@@ -464,7 +481,7 @@ static const struct txpwr_map __txpwr_map_lmt_ru = {
};
static u8 __print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent,
- const u8 *buf, const u8 cur)
+ const s8 *buf, const u8 cur)
{
char *fmt;
@@ -493,8 +510,9 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
const struct txpwr_map *map)
{
u8 fct = rtwdev->chip->txpwr_factor_mac;
- u8 *buf, cur, i;
u32 val, addr;
+ s8 *buf, tmp;
+ u8 cur, i;
int ret;
buf = vzalloc(map->addr_to - map->addr_from + 4);
@@ -507,8 +525,11 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
val = MASKDWORD;
cur = addr - map->addr_from;
- for (i = 0; i < 4; i++, val >>= 8)
- buf[cur + i] = FIELD_GET(MASKBYTE0, val) >> fct;
+ for (i = 0; i < 4; i++, val >>= 8) {
+ /* signed 7 bits, and reserved BIT(7) */
+ tmp = sign_extend32(val, 6);
+ buf[cur + i] = tmp >> fct;
+ }
}
for (cur = 0, i = 0; i < map->size; i++)
@@ -770,13 +791,34 @@ rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v)
{
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ bool grant_read = false;
+
+ if (debugfs_priv->mac_mem.sel >= RTW89_MAC_MEM_NUM)
+ return -ENOENT;
+
+ if (rtwdev->chip->chip_id == RTL8852C) {
+ switch (debugfs_priv->mac_mem.sel) {
+ case RTW89_MAC_MEM_TXD_FIFO_0_V1:
+ case RTW89_MAC_MEM_TXD_FIFO_1_V1:
+ case RTW89_MAC_MEM_TXDATA_FIFO_0:
+ case RTW89_MAC_MEM_TXDATA_FIFO_1:
+ grant_read = true;
+ break;
+ default:
+ break;
+ }
+ }
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
+ if (grant_read)
+ rtw89_write32_set(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO);
rtw89_debug_dump_mac_mem(m, rtwdev,
debugfs_priv->mac_mem.sel,
debugfs_priv->mac_mem.start,
debugfs_priv->mac_mem.len);
+ if (grant_read)
+ rtw89_write32_clr(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO);
mutex_unlock(&rtwdev->mutex);
return 0;
@@ -947,7 +989,9 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
struct seq_file *m)
{
- int ret;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 dmac_err;
+ int i, ret;
ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
if (ret) {
@@ -955,98 +999,347 @@ static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
return ret;
}
- seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR));
- seq_printf(m, "[0]R_AX_WDRLS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
- seq_printf(m, "[1]R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ERR_IMR_ISR));
- seq_printf(m, "[2.1]R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
- seq_printf(m, "[2.2]R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
- seq_printf(m, "[3]R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
- seq_printf(m, "[4]R_AX_WDE_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- seq_printf(m, "[5.1]R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
- seq_printf(m, "[5.2]R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
- seq_printf(m, "[6]R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
- seq_printf(m, "[7]R_AX_PKTIN_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
- seq_printf(m, "[8.1]R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
- seq_printf(m, "[8.2]R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
- seq_printf(m, "[8.3]R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
- seq_printf(m, "[10]R_AX_CPUIO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR));
- seq_printf(m, "[11.1]R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
- seq_printf(m, "[11.2]R_AX_BBRPT_CHINFO_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR_ISR));
- seq_printf(m, "[11.3]R_AX_BBRPT_DFS_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR_ISR));
- seq_printf(m, "[11.4]R_AX_LA_ERRFLAG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_LA_ERRFLAG));
+ dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR);
+ seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err);
+ seq_printf(m, "R_AX_DMAC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR));
+
+ if (dmac_err) {
+ seq_printf(m, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1));
+ seq_printf(m, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1));
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG));
+ seq_printf(m, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG));
+ seq_printf(m, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN));
+ seq_printf(m, "R_AX_PLE_DBGERR_STS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS));
+ }
+ }
+
+ if (dmac_err & B_AX_WDRLS_ERR_FLAG) {
+ seq_printf(m, "R_AX_WDRLS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
+ seq_printf(m, "R_AX_WDRLS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
+ if (chip->chip_id == RTL8852C)
+ seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1));
+ else
+ seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
+ }
+
+ if (dmac_err & B_AX_WSEC_ERR_FLAG) {
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_SEC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR));
+ seq_printf(m, "R_AX_SEC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG));
+ seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ seq_printf(m, "R_AX_SEC_DEBUG1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG1));
+ seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+
+ rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
+ B_AX_DBG_SEL0, 0x8B);
+ rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
+ B_AX_DBG_SEL1, 0x8B);
+ rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1,
+ B_AX_SEL_0XC0_MASK, 1);
+ for (i = 0; i < 0x10; i++) {
+ rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
+ B_AX_SEC_DBG_PORT_FIELD_MASK, i);
+ seq_printf(m, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
+ i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
+ }
+ } else {
+ seq_printf(m, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
+ seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ seq_printf(m, "R_AX_SEC_CAM_WDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
+ seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ seq_printf(m, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
+ seq_printf(m, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
+ }
+ }
+
+ if (dmac_err & B_AX_MPDU_ERR_FLAG) {
+ seq_printf(m, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
+ seq_printf(m, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
+ seq_printf(m, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
+ seq_printf(m, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
+ seq_printf(m, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
+ seq_printf(m, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
+ seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ }
+
+ if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
+ seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR));
+ seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR));
+ seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR));
+ } else {
+ seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
+ seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ }
+ }
+
+ if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) {
+ seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ seq_printf(m, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
+ seq_printf(m, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
+ seq_printf(m, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
+ seq_printf(m, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
+ seq_printf(m, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
+ seq_printf(m, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
+ seq_printf(m, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
+ seq_printf(m, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_RX_CTRL0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL0));
+ seq_printf(m, "R_AX_RX_CTRL1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL1));
+ seq_printf(m, "R_AX_RX_CTRL2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL2));
+ } else {
+ seq_printf(m, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
+ seq_printf(m, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
+ seq_printf(m, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ }
+ }
+
+ if (dmac_err & B_AX_PKTIN_ERR_FLAG) {
+ seq_printf(m, "R_AX_PKTIN_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
+ seq_printf(m, "R_AX_PKTIN_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_DISPATCH_ERR_FLAG) {
+ seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
+ seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
+ seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
+ seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_BBRPT_ERR_FLAG) {
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
+ seq_printf(m, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR));
+ seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ } else {
+ seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ }
+ }
+
+ if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
+ seq_printf(m, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
+ }
return 0;
}
-static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev,
+ struct seq_file *m,
+ enum rtw89_mac_idx band)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 offset = 0;
+ u32 cmac_err;
int ret;
- ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL);
+ ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL);
if (ret) {
- seq_puts(m, "[CMAC] : CMAC 0 not enabled\n");
+ if (band)
+ seq_puts(m, "[CMAC] : CMAC1 not enabled\n");
+ else
+ seq_puts(m, "[CMAC] : CMAC0 not enabled\n");
return ret;
}
- seq_printf(m, "R_AX_CMAC_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR));
- seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR));
- seq_printf(m, "[1]R_AX_PTCL_ISR0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PTCL_ISR0));
- seq_printf(m, "[3]R_AX_DLE_CTRL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DLE_CTRL));
- seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR));
- seq_printf(m, "[5]R_AX_TXPWR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPWR_ISR));
- seq_printf(m, "[6]R_AX_RMAC_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR));
- seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR));
-
- ret = rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL);
- if (ret) {
- seq_puts(m, "[CMAC] : CMAC 1 not enabled\n");
- return ret;
+ if (band)
+ offset = RTW89_MAC_AX_BAND_REG_OFFSET;
+
+ cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset);
+ seq_printf(m, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset));
+ seq_printf(m, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset));
+ seq_printf(m, "R_AX_CK_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CK_EN + offset));
+
+ if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) {
+ seq_printf(m, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset));
+ seq_printf(m, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset));
+ }
+
+ if (cmac_err & B_AX_PTCL_TOP_ERR_IND) {
+ seq_printf(m, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset));
+ seq_printf(m, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset));
+ }
+
+ if (cmac_err & B_AX_DMA_TOP_ERR_IND) {
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset));
+ seq_printf(m, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset));
+ } else {
+ seq_printf(m, "R_AX_DLE_CTRL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset));
+ }
+ }
+
+ if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) {
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset));
+ seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ } else {
+ seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ }
+ }
+
+ if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
+ seq_printf(m, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset));
+ seq_printf(m, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset));
+ }
+
+ if (cmac_err & B_AX_WMAC_TX_ERR_IND) {
+ if (chip->chip_id == RTL8852C) {
+ seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset));
+ seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset));
+ } else {
+ seq_printf(m, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset));
+ }
+ seq_printf(m, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset));
}
- seq_printf(m, "R_AX_CMAC_ERR_ISR_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR_C1));
- seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR_C1));
- seq_printf(m, "[1]R_AX_PTCL_ISR0_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PTCL_ISR0_C1));
- seq_printf(m, "[3]R_AX_DLE_CTRL_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DLE_CTRL_C1));
- seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR_C1=0x%02x\n",
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR_C1));
- seq_printf(m, "[5]R_AX_TXPWR_ISR_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPWR_ISR_C1));
- seq_printf(m, "[6]R_AX_RMAC_ERR_ISR_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR_C1));
- seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR_C1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR_C1));
+ seq_printf(m, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
+
+ return 0;
+}
+
+static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev,
+ struct seq_file *m)
+{
+ rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_0);
+ if (rtwdev->dbcc_en)
+ rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_1);
return 0;
}
@@ -1073,6 +1366,303 @@ static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c1 = {
.rd_msk = B_AX_PTCL_DBG_INFO_MASK
};
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_tx0_5 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0xD,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_tx6 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x5,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_tx7 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x9,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_tx8 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x3,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_tx9_C = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x1,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_txD = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x0,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_tx0 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0xB,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_tx1 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x4,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_tx3 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x8,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_tx4 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x7,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_tx5_8 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x1,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_tx9 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x3,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_txA_C = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x0,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_rx0 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x8,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_rx1_2 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x0,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_rx3 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x6,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_rx4 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x0,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_hdt_rx5 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DISPATCHER_DBG_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x0,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_rx_p0_0 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x3,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_rx_p0_1 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x6,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_rx_p0_2 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x0,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_cdt_rx_p1 = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x8,
+ .end = 0xE,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_stf_ctrl = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x5,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_addr_ctrl = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x6,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_wde_intf = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0xF,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_ple_intf = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x9,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_dspt_flow_ctrl = {
+ .sel_addr = R_AX_DISPATCHER_DBG_PORT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DISPATCHER_CH_SEL_MASK,
+ .srt = 0x0,
+ .end = 0x3,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
static const struct rtw89_mac_dbg_port_info dbg_port_sch_c0 = {
.sel_addr = R_AX_SCH_DBG_SEL,
.sel_byte = 1,
@@ -1483,7 +2073,7 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pktinfo = {
static const struct rtw89_mac_dbg_port_info dbg_port_pcie_txdma = {
.sel_addr = R_AX_PCIE_DBG_CTRL,
.sel_byte = 2,
- .sel_msk = B_AX_DBG_SEL_MASK,
+ .sel_msk = B_AX_PCIE_DBG_SEL_MASK,
.srt = 0x00,
.end = 0x03,
.rd_addr = R_AX_DBG_PORT_SEL,
@@ -1494,7 +2084,7 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_txdma = {
static const struct rtw89_mac_dbg_port_info dbg_port_pcie_rxdma = {
.sel_addr = R_AX_PCIE_DBG_CTRL,
.sel_byte = 2,
- .sel_msk = B_AX_DBG_SEL_MASK,
+ .sel_msk = B_AX_PCIE_DBG_SEL_MASK,
.srt = 0x00,
.end = 0x04,
.rd_addr = R_AX_DBG_PORT_SEL,
@@ -1505,7 +2095,7 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_rxdma = {
static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cvt = {
.sel_addr = R_AX_PCIE_DBG_CTRL,
.sel_byte = 2,
- .sel_msk = B_AX_DBG_SEL_MASK,
+ .sel_msk = B_AX_PCIE_DBG_SEL_MASK,
.srt = 0x00,
.end = 0x01,
.rd_addr = R_AX_DBG_PORT_SEL,
@@ -1516,7 +2106,7 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cvt = {
static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cxpl = {
.sel_addr = R_AX_PCIE_DBG_CTRL,
.sel_byte = 2,
- .sel_msk = B_AX_DBG_SEL_MASK,
+ .sel_msk = B_AX_PCIE_DBG_SEL_MASK,
.srt = 0x00,
.end = 0x05,
.rd_addr = R_AX_DBG_PORT_SEL,
@@ -1527,7 +2117,7 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cxpl = {
static const struct rtw89_mac_dbg_port_info dbg_port_pcie_io = {
.sel_addr = R_AX_PCIE_DBG_CTRL,
.sel_byte = 2,
- .sel_msk = B_AX_DBG_SEL_MASK,
+ .sel_msk = B_AX_PCIE_DBG_SEL_MASK,
.srt = 0x00,
.end = 0x05,
.rd_addr = R_AX_DBG_PORT_SEL,
@@ -1538,7 +2128,7 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_io = {
static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc = {
.sel_addr = R_AX_PCIE_DBG_CTRL,
.sel_byte = 2,
- .sel_msk = B_AX_DBG_SEL_MASK,
+ .sel_msk = B_AX_PCIE_DBG_SEL_MASK,
.srt = 0x00,
.end = 0x06,
.rd_addr = R_AX_DBG_PORT_SEL,
@@ -1562,6 +2152,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
struct rtw89_dev *rtwdev, u32 sel)
{
const struct rtw89_mac_dbg_port_info *info;
+ u32 index;
u32 val32;
u16 val16;
u8 val8;
@@ -1837,6 +2428,235 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
info = &dbg_port_pktinfo;
seq_puts(m, "Enable pktinfo dump.\n");
break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX0:
+ rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
+ B_AX_DBG_SEL0, 0x80);
+ rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1,
+ B_AX_SEL_0XC0_MASK, 1);
+ fallthrough;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX1:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX2:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX3:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX4:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX5:
+ info = &dbg_port_dspt_hdt_tx0_5;
+ index = sel - RTW89_DBG_PORT_SEL_DSPT_HDT_TX0;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 0);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, index);
+ seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index);
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX6:
+ info = &dbg_port_dspt_hdt_tx6;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 0);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 6);
+ seq_puts(m, "Enable Dispatcher hdt tx6 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX7:
+ info = &dbg_port_dspt_hdt_tx7;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 0);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 7);
+ seq_puts(m, "Enable Dispatcher hdt tx7 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX8:
+ info = &dbg_port_dspt_hdt_tx8;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 0);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 8);
+ seq_puts(m, "Enable Dispatcher hdt tx8 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TX9:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TXA:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TXB:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TXC:
+ info = &dbg_port_dspt_hdt_tx9_C;
+ index = sel + 9 - RTW89_DBG_PORT_SEL_DSPT_HDT_TX9;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 0);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, index);
+ seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index);
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_TXD:
+ info = &dbg_port_dspt_hdt_txD;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 0);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 0xD);
+ seq_puts(m, "Enable Dispatcher hdt txD dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX0:
+ info = &dbg_port_dspt_cdt_tx0;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 0);
+ seq_puts(m, "Enable Dispatcher cdt tx0 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX1:
+ info = &dbg_port_dspt_cdt_tx1;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 1);
+ seq_puts(m, "Enable Dispatcher cdt tx1 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX3:
+ info = &dbg_port_dspt_cdt_tx3;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 3);
+ seq_puts(m, "Enable Dispatcher cdt tx3 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX4:
+ info = &dbg_port_dspt_cdt_tx4;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 4);
+ seq_puts(m, "Enable Dispatcher cdt tx4 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX5:
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX6:
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX7:
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX8:
+ info = &dbg_port_dspt_cdt_tx5_8;
+ index = sel + 5 - RTW89_DBG_PORT_SEL_DSPT_CDT_TX5;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, index);
+ seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index);
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TX9:
+ info = &dbg_port_dspt_cdt_tx9;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 9);
+ seq_puts(m, "Enable Dispatcher cdt tx9 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TXA:
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TXB:
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_TXC:
+ info = &dbg_port_dspt_cdt_txA_C;
+ index = sel + 0xA - RTW89_DBG_PORT_SEL_DSPT_CDT_TXA;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 1);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, index);
+ seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index);
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_RX0:
+ info = &dbg_port_dspt_hdt_rx0;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 2);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 0);
+ seq_puts(m, "Enable Dispatcher hdt rx0 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_RX1:
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_RX2:
+ info = &dbg_port_dspt_hdt_rx1_2;
+ index = sel + 1 - RTW89_DBG_PORT_SEL_DSPT_HDT_RX1;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 2);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, index);
+ seq_printf(m, "Enable Dispatcher hdt rx%x dump.\n", index);
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_RX3:
+ info = &dbg_port_dspt_hdt_rx3;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 2);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 3);
+ seq_puts(m, "Enable Dispatcher hdt rx3 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_RX4:
+ info = &dbg_port_dspt_hdt_rx4;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 2);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 4);
+ seq_puts(m, "Enable Dispatcher hdt rx4 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_HDT_RX5:
+ info = &dbg_port_dspt_hdt_rx5;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 2);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 5);
+ seq_puts(m, "Enable Dispatcher hdt rx5 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_0:
+ info = &dbg_port_dspt_cdt_rx_p0_0;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 3);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 0);
+ seq_puts(m, "Enable Dispatcher cdt rx part0 0 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0:
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_1:
+ info = &dbg_port_dspt_cdt_rx_p0_1;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 3);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 1);
+ seq_puts(m, "Enable Dispatcher cdt rx part0 1 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_2:
+ info = &dbg_port_dspt_cdt_rx_p0_2;
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 3);
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_CH_SEL_MASK, 2);
+ seq_puts(m, "Enable Dispatcher cdt rx part0 2 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P1:
+ info = &dbg_port_dspt_cdt_rx_p1;
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 3);
+ seq_puts(m, "Enable Dispatcher cdt rx part1 dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_STF_CTRL:
+ info = &dbg_port_dspt_stf_ctrl;
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 4);
+ seq_puts(m, "Enable Dispatcher stf control dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_ADDR_CTRL:
+ info = &dbg_port_dspt_addr_ctrl;
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 5);
+ seq_puts(m, "Enable Dispatcher addr control dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_WDE_INTF:
+ info = &dbg_port_dspt_wde_intf;
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 6);
+ seq_puts(m, "Enable Dispatcher wde interface dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_PLE_INTF:
+ info = &dbg_port_dspt_ple_intf;
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 7);
+ seq_puts(m, "Enable Dispatcher ple interface dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_DSPT_FLOW_CTRL:
+ info = &dbg_port_dspt_flow_ctrl;
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ B_AX_DISPATCHER_INTN_SEL_MASK, 8);
+ seq_puts(m, "Enable Dispatcher flow control dump.\n");
+ break;
case RTW89_DBG_PORT_SEL_PCIE_TXDMA:
info = &dbg_port_pcie_txdma;
val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
@@ -1889,7 +2709,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
info = &dbg_port_pcie_misc2;
val16 = rtw89_read16(rtwdev, R_AX_PCIE_DBG_CTRL);
val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL,
- B_AX_DBG_SEL_MASK);
+ B_AX_PCIE_DBG_SEL_MASK);
rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16);
seq_puts(m, "Enable pcie misc2 dump.\n");
break;
@@ -1915,6 +2735,10 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
sel >= RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG &&
sel <= RTW89_DBG_PORT_SEL_PKTINFO)
return false;
+ if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL) &&
+ sel >= RTW89_DBG_PORT_SEL_DSPT_HDT_TX0 &&
+ sel <= RTW89_DBG_PORT_SEL_DSPT_FLOW_CTRL)
+ return false;
if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL) &&
sel >= RTW89_DBG_PORT_SEL_PTCL_C0 &&
sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C0)
@@ -1985,6 +2809,50 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
case_DBG_SEL(PLE_QUEMGN_QLNKTBL);
case_DBG_SEL(PLE_QUEMGN_QEMPTY);
case_DBG_SEL(PKTINFO);
+ case_DBG_SEL(DSPT_HDT_TX0);
+ case_DBG_SEL(DSPT_HDT_TX1);
+ case_DBG_SEL(DSPT_HDT_TX2);
+ case_DBG_SEL(DSPT_HDT_TX3);
+ case_DBG_SEL(DSPT_HDT_TX4);
+ case_DBG_SEL(DSPT_HDT_TX5);
+ case_DBG_SEL(DSPT_HDT_TX6);
+ case_DBG_SEL(DSPT_HDT_TX7);
+ case_DBG_SEL(DSPT_HDT_TX8);
+ case_DBG_SEL(DSPT_HDT_TX9);
+ case_DBG_SEL(DSPT_HDT_TXA);
+ case_DBG_SEL(DSPT_HDT_TXB);
+ case_DBG_SEL(DSPT_HDT_TXC);
+ case_DBG_SEL(DSPT_HDT_TXD);
+ case_DBG_SEL(DSPT_HDT_TXE);
+ case_DBG_SEL(DSPT_HDT_TXF);
+ case_DBG_SEL(DSPT_CDT_TX0);
+ case_DBG_SEL(DSPT_CDT_TX1);
+ case_DBG_SEL(DSPT_CDT_TX3);
+ case_DBG_SEL(DSPT_CDT_TX4);
+ case_DBG_SEL(DSPT_CDT_TX5);
+ case_DBG_SEL(DSPT_CDT_TX6);
+ case_DBG_SEL(DSPT_CDT_TX7);
+ case_DBG_SEL(DSPT_CDT_TX8);
+ case_DBG_SEL(DSPT_CDT_TX9);
+ case_DBG_SEL(DSPT_CDT_TXA);
+ case_DBG_SEL(DSPT_CDT_TXB);
+ case_DBG_SEL(DSPT_CDT_TXC);
+ case_DBG_SEL(DSPT_HDT_RX0);
+ case_DBG_SEL(DSPT_HDT_RX1);
+ case_DBG_SEL(DSPT_HDT_RX2);
+ case_DBG_SEL(DSPT_HDT_RX3);
+ case_DBG_SEL(DSPT_HDT_RX4);
+ case_DBG_SEL(DSPT_HDT_RX5);
+ case_DBG_SEL(DSPT_CDT_RX_P0);
+ case_DBG_SEL(DSPT_CDT_RX_P0_0);
+ case_DBG_SEL(DSPT_CDT_RX_P0_1);
+ case_DBG_SEL(DSPT_CDT_RX_P0_2);
+ case_DBG_SEL(DSPT_CDT_RX_P1);
+ case_DBG_SEL(DSPT_STF_CTRL);
+ case_DBG_SEL(DSPT_ADDR_CTRL);
+ case_DBG_SEL(DSPT_WDE_INTF);
+ case_DBG_SEL(DSPT_PLE_INTF);
+ case_DBG_SEL(DSPT_FLOW_CTRL);
case_DBG_SEL(PCIE_TXDMA);
case_DBG_SEL(PCIE_RXDMA);
case_DBG_SEL(PCIE_CVT);
@@ -2354,6 +3222,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
else
seq_printf(m, "Legacy %d", rate->legacy);
seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : "");
+ seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(rate->bw));
seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate);
seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait,
sta->deflink.agg.max_rc_amsdu_len);
@@ -2379,6 +3248,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
he_gi_str[rate->he_gi] : "N/A");
break;
}
+ seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw));
seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
rssi = ewma_rssi_read(&rtwsta->avg_rssi);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index ee243aadde87..d1de5e600836 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -26,6 +26,8 @@ enum rtw89_debug_mask {
RTW89_DBG_HW_SCAN = BIT(15),
RTW89_DBG_SAR = BIT(16),
RTW89_DBG_STATE = BIT(17),
+ RTW89_DBG_WOW = BIT(18),
+ RTW89_DBG_UL_TB = BIT(19),
RTW89_DBG_UNEXP = BIT(31),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index d57e3610fb88..7e682709232d 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -11,6 +11,9 @@
#include "phy.h"
#include "reg.h"
+static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb);
+
static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
bool header)
{
@@ -85,15 +88,31 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
{
struct rtw89_fw_hdr_section_info *section_info;
const u8 *fw_end = fw + len;
+ const u8 *fwdynhdr;
const u8 *bin;
+ u32 base_hdr_len;
u32 i;
if (!info)
return -EINVAL;
info->section_num = GET_FW_HDR_SEC_NUM(fw);
- info->hdr_len = RTW89_FW_HDR_SIZE +
- info->section_num * RTW89_FW_SECTION_HDR_SIZE;
+ base_hdr_len = RTW89_FW_HDR_SIZE +
+ info->section_num * RTW89_FW_SECTION_HDR_SIZE;
+ info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw);
+
+ if (info->dynamic_hdr_en) {
+ info->hdr_len = GET_FW_HDR_LEN(fw);
+ info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
+ fwdynhdr = fw + base_hdr_len;
+ if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) {
+ rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
+ return -EINVAL;
+ }
+ } else {
+ info->hdr_len = base_hdr_len;
+ info->dynamic_hdr_len = 0;
+ }
bin = fw + info->hdr_len;
@@ -515,6 +534,11 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
u8 val;
int ret;
+ rtw89_mac_disable_cpu(rtwdev);
+ ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
+ if (ret)
+ return ret;
+
if (!fw || !len) {
rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
return -ENOENT;
@@ -534,7 +558,7 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
goto fwdl_err;
}
- ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len);
+ ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len);
if (ret) {
ret = -EBUSY;
goto fwdl_err;
@@ -848,6 +872,56 @@ fail:
return ret;
}
+static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ enum rtw89_fw_pkt_ofld_type type,
+ u8 *id)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_pktofld_info *info;
+ struct sk_buff *skb;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ switch (type) {
+ case RTW89_PKT_OFLD_TYPE_PS_POLL:
+ skb = ieee80211_pspoll_get(rtwdev->hw, vif);
+ break;
+ case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
+ skb = ieee80211_proberesp_get(rtwdev->hw, vif);
+ break;
+ case RTW89_PKT_OFLD_TYPE_NULL_DATA:
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
+ break;
+ case RTW89_PKT_OFLD_TYPE_QOS_NULL:
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
+ break;
+ default:
+ goto err;
+ }
+
+ if (!skb)
+ goto err;
+
+ list_add_tail(&info->list, &rtw_wow->pkt_list);
+ ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
+ kfree_skb(skb);
+
+ if (ret)
+ return ret;
+
+ *id = info->id;
+ return 0;
+
+err:
+ kfree(info);
+ return -ENOMEM;
+}
+
#define H2C_GENERAL_PKT_LEN 6
#define H2C_GENERAL_PKT_ID_UND 0xff
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
@@ -2192,7 +2266,7 @@ fail:
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
struct rtw89_fw_h2c_rf_get_mccch *mccch;
struct sk_buff *skb;
int ret;
@@ -2205,10 +2279,10 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
skb_put(skb, sizeof(*mccch));
mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
- mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]);
- mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
- mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
- mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
+ mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
+ mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
+ mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
+ mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
mccch->current_channel = cpu_to_le32(chan->channel);
mccch->current_band_type = cpu_to_le32(chan->band_type);
@@ -2311,8 +2385,43 @@ void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
mutex_unlock(&rtwdev->mutex);
}
+static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
+{
+ struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
+
+ attr->category = RTW89_GET_C2H_CATEGORY(c2h->data);
+ attr->class = RTW89_GET_C2H_CLASS(c2h->data);
+ attr->func = RTW89_GET_C2H_FUNC(c2h->data);
+ attr->len = RTW89_GET_C2H_LEN(c2h->data);
+}
+
+static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h)
+{
+ struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
+ u8 category = attr->category;
+ u8 class = attr->class;
+ u8 func = attr->func;
+
+ switch (category) {
+ default:
+ return false;
+ case RTW89_C2H_CAT_MAC:
+ return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
+ }
+}
+
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
{
+ rtw89_fw_c2h_parse_attr(c2h);
+ if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
+ goto enqueue;
+
+ rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
+ dev_kfree_skb_any(c2h);
+ return;
+
+enqueue:
skb_queue_tail(&rtwdev->c2h_queue, c2h);
ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
}
@@ -2320,10 +2429,11 @@ void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
- u8 category = RTW89_GET_C2H_CATEGORY(skb->data);
- u8 class = RTW89_GET_C2H_CLASS(skb->data);
- u8 func = RTW89_GET_C2H_FUNC(skb->data);
- u16 len = RTW89_GET_C2H_LEN(skb->data);
+ struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
+ u8 category = attr->category;
+ u8 class = attr->class;
+ u8 func = attr->func;
+ u16 len = attr->len;
bool dump = true;
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
@@ -2565,6 +2675,9 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
struct rtw89_mac_chinfo *ch_info)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_pktofld_info *info;
u8 band, probe_count = 0;
@@ -2576,13 +2689,13 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
ch_info->tx_pwr_idx = 0;
ch_info->tx_null = false;
ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
if (ssid_num) {
ch_info->num_pkt = ssid_num;
band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
list_for_each_entry(info, &scan_info->pkt_list[band], list) {
- ch_info->probe_id = info->id;
ch_info->pkt_id[probe_count] = info->id;
if (++probe_count >= ssid_num)
break;
@@ -2591,9 +2704,16 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
rtw89_err(rtwdev, "SSID num differs from list len\n");
}
+ if (ch_info->ch_band == RTW89_BAND_6G) {
+ if (ssid_num == 1 && req->ssids[0].ssid_len == 0) {
+ ch_info->tx_pkt = false;
+ if (!req->duration_mandatory)
+ ch_info->period -= RTW89_DWELL_TIME;
+ }
+ }
+
switch (chan_type) {
case RTW89_CHAN_OPERATE:
- ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
ch_info->central_ch = scan_info->op_chan;
ch_info->pri_ch = scan_info->op_pri_ch;
ch_info->ch_band = scan_info->op_band;
@@ -2602,8 +2722,9 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
ch_info->num_pkt = 0;
break;
case RTW89_CHAN_DFS:
- ch_info->period = max_t(u8, ch_info->period,
- RTW89_DFS_CHAN_TIME);
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period = max_t(u8, ch_info->period,
+ RTW89_DFS_CHAN_TIME);
ch_info->dwell_time = RTW89_DWELL_TIME;
break;
case RTW89_CHAN_ACTIVE:
@@ -2637,8 +2758,13 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
goto out;
}
- ch_info->period = req->duration_mandatory ?
- req->duration : RTW89_CHANNEL_TIME;
+ if (req->duration_mandatory)
+ ch_info->period = req->duration;
+ else if (channel->band == NL80211_BAND_6GHZ)
+ ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME;
+ else
+ ch_info->period = RTW89_CHANNEL_TIME;
+
ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
ch_info->central_ch = channel->hw_value;
ch_info->pri_ch = channel->hw_value;
@@ -2757,6 +2883,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
rtw89_store_op_chan(rtwdev, false);
+ rtw89_set_channel(rtwdev);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
@@ -2862,6 +2989,7 @@ int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
+ case RTW89_PKT_DROP_SEL_BAND_ONCE:
break;
default:
rtw89_debug(rtwdev, RTW89_DBG_FW,
@@ -2877,6 +3005,14 @@ int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
+ RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
+ params->macid_band_sel[0]);
+ RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
+ params->macid_band_sel[1]);
+ RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
+ params->macid_band_sel[2]);
+ RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
+ params->macid_band_sel[3]);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
@@ -2896,3 +3032,563 @@ fail:
dev_kfree_skb_any(skb);
return ret;
}
+
+#define H2C_KEEP_ALIVE_LEN 4
+int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct sk_buff *skb;
+ u8 pkt_id = 0;
+ int ret;
+
+ if (enable) {
+ ret = rtw89_fw_h2c_add_wow_fw_ofld(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id);
+ if (ret)
+ return -EPERM;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_KEEP_ALIVE_LEN);
+
+ RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
+ RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
+ RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
+ RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_KEEP_ALIVE, 0, 1,
+ H2C_KEEP_ALIVE_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_DISCONNECT_DETECT_LEN 8
+int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool enable)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct sk_buff *skb;
+ u8 macid = rtwvif->mac_id;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
+
+ if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
+ RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
+ RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
+ RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
+ RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
+ RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_DISCONNECT_DETECT, 0, 1,
+ H2C_DISCONNECT_DETECT_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_WOW_GLOBAL_LEN 8
+int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct sk_buff *skb;
+ u8 macid = rtwvif->mac_id;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_WOW_GLOBAL_LEN);
+
+ RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
+ RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_WOW_GLOBAL, 0, 1,
+ H2C_WOW_GLOBAL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_WAKEUP_CTRL_LEN 4
+int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct sk_buff *skb;
+ u8 macid = rtwvif->mac_id;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_WAKEUP_CTRL_LEN);
+
+ if (rtw_wow->pattern_cnt)
+ RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
+ if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
+ RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
+ if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
+ RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
+
+ RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_WAKEUP_CTRL, 0, 1,
+ H2C_WAKEUP_CTRL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_WOW_CAM_UPD_LEN 24
+int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_WOW_CAM_UPD_LEN);
+
+ RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
+ RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
+ if (cam_info->valid) {
+ RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
+ RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
+ RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
+ RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
+ RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
+ RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
+ cam_info->negative_pattern_match);
+ RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
+ cam_info->skip_mac_hdr);
+ RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
+ RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
+ RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
+ }
+ RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_WOW_CAM_UPD, 0, 1,
+ H2C_WOW_CAM_UPD_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ struct rtw89_wait_info *wait, unsigned int cond)
+{
+ int ret;
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return rtw89_wait_for_cond(wait, cond);
+}
+
+#define H2C_ADD_MCC_LEN 16
+int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_add_req *p)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for add mcc\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_ADD_MCC_LEN);
+ RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
+ RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
+ RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
+ RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
+ RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
+ RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
+ RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
+ RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
+ RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
+ RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
+ RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
+ RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
+ RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
+ RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
+ RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
+ RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
+ RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
+ RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
+ RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
+ RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_ADD_MCC, 0, 0,
+ H2C_ADD_MCC_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_START_MCC_LEN 12
+int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_start_req *p)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for start mcc\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_START_MCC_LEN);
+ RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
+ RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
+ RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
+ RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
+ RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
+ RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
+ RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
+ RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
+ RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_START_MCC, 0, 0,
+ H2C_START_MCC_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_STOP_MCC_LEN 4
+int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
+ bool prev_groups)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for stop mcc\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_STOP_MCC_LEN);
+ RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
+ RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
+ RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_STOP_MCC, 0, 0,
+ H2C_STOP_MCC_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_DEL_MCC_GROUP_LEN 4
+int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
+ bool prev_groups)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for del mcc group\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
+ RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
+ RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_DEL_MCC_GROUP, 0, 0,
+ H2C_DEL_MCC_GROUP_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_RESET_MCC_GROUP_LEN 4
+int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for reset mcc group\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
+ RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_RESET_MCC_GROUP, 0, 0,
+ H2C_RESET_MCC_GROUP_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_MCC_REQ_TSF_LEN 4
+int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_tsf_req *req,
+ struct rtw89_mac_mcc_tsf_rpt *rpt)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_mac_mcc_tsf_rpt *tmp;
+ struct sk_buff *skb;
+ unsigned int cond;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for mcc req tsf\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_MCC_REQ_TSF_LEN);
+ RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
+ RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
+ RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_MCC_REQ_TSF, 0, 0,
+ H2C_MCC_REQ_TSF_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret)
+ return ret;
+
+ tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
+ *rpt = *tmp;
+
+ return 0;
+}
+
+#define H2C_MCC_MACID_BITMAP_DSC_LEN 4
+int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid,
+ u8 *bitmap)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+ u8 map_len;
+ u8 h2c_len;
+
+ BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
+ map_len = RTW89_MAX_MAC_ID_NUM / 8;
+ h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for mcc macid bitmap\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, h2c_len);
+ RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
+ RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
+ RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
+ RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
+ h2c_len);
+
+ cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_MCC_SYNC_LEN 4
+int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
+ u8 target, u8 offset)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for mcc sync\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_MCC_SYNC_LEN);
+ RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
+ RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
+ RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
+ RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_MCC_SYNC, 0, 0,
+ H2C_MCC_SYNC_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+#define H2C_MCC_SET_DURATION_LEN 20
+int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_duration *p)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for mcc set duration\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_MCC_SET_DURATION_LEN);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
+ p->start_tsf_low);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
+ p->start_tsf_high);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
+ RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MCC,
+ H2C_FUNC_MCC_SET_DURATION, 0, 0,
+ H2C_MCC_SET_DURATION_LEN);
+
+ cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 0047d5d0e9b1..46d57414f24e 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -176,6 +176,8 @@ struct rtw89_fw_hdr_section_info {
struct rtw89_fw_bin_info {
u8 section_num;
u32 hdr_len;
+ bool dynamic_hdr_en;
+ u32 dynamic_hdr_len;
struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM];
};
@@ -197,6 +199,7 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
+#define RTW89_CHANNEL_TIME_6G 20
#define RTW89_DFS_CHAN_TIME 105
#define RTW89_OFF_CHAN_TIME 100
#define RTW89_DWELL_TIME 20
@@ -494,6 +497,8 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 16))
#define GET_FW_HDR_SUBINDEX(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(31, 24))
+#define GET_FW_HDR_LEN(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr) + 3), GENMASK(23, 16))
#define GET_FW_HDR_MONTH(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(7, 0))
#define GET_FW_HDR_DATE(fwhdr) \
@@ -506,8 +511,16 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
le32_get_bits(*((const __le32 *)(fwhdr) + 5), GENMASK(31, 0))
#define GET_FW_HDR_SEC_NUM(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 6), GENMASK(15, 8))
+#define GET_FW_HDR_DYN_HDR(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr) + 7), BIT(16))
#define GET_FW_HDR_CMD_VERSERION(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 7), GENMASK(31, 24))
+
+#define GET_FW_DYNHDR_LEN(fwdynhdr) \
+ le32_get_bits(*((const __le32 *)(fwdynhdr)), GENMASK(31, 0))
+#define GET_FW_DYNHDR_COUNT(fwdynhdr) \
+ le32_get_bits(*((const __le32 *)(fwdynhdr) + 1), GENMASK(31, 0))
+
static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
{
le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
@@ -1860,6 +1873,231 @@ static inline void RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(void *cmd, u32 va
le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(15, 8));
}
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 2, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 3, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 4, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 5, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_KEEP_ALIVE_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_KEEP_ALIVE_PERIOD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(24, 16));
+}
+
+static inline void RTW89_SET_KEEP_ALIVE_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(0));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_TRYOK_BCNFAIL_COUNT_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(1));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_DISCONNECT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(2));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_MAC_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_DISCONNECT_DETECT_TRYOK_BCNFAIL_COUNT_LIMIT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(0));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_DROP_ALL_PKT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(1));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_RX_PARSE_AFTER_WAKE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(2));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_WAKE_BAR_PULLED(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(3));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_MAC_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_PAIRWISE_SEC_ALGO(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_GROUP_SEC_ALGO(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_WOW_GLOBAL_REMOTECTRL_INFO_CONTENT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(0));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(1));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_HW_UNICAST_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(2));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_FW_UNICAST_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(3));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(4));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_REKEYP_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(5));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_EAP_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(6));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_ALL_DATA_ENABLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(7));
+}
+
+static inline void RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_R_W(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(0));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_IDX(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 1));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_WKFM1(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_WKFM2(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 2, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_WKFM3(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 3, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_WKFM4(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 4, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_CRC(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, BIT(22));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, BIT(23));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_UC(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, BIT(24));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_MC(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, BIT(25));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_BC(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, BIT(26));
+}
+
+static inline void RTW89_SET_WOW_CAM_UPD_VALID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 5, val, BIT(31));
+}
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -2529,6 +2767,355 @@ static inline void RTW89_SET_FWCMD_TSF32_TOGL_EARLY(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 16));
}
+enum rtw89_fw_mcc_c2h_rpt_cfg {
+ RTW89_FW_MCC_C2H_RPT_OFF = 0,
+ RTW89_FW_MCC_C2H_RPT_FAIL_ONLY = 1,
+ RTW89_FW_MCC_C2H_RPT_ALL = 2,
+};
+
+struct rtw89_fw_mcc_add_req {
+ u8 macid;
+ u8 central_ch_seg0;
+ u8 central_ch_seg1;
+ u8 primary_ch;
+ enum rtw89_bandwidth bandwidth: 4;
+ u32 group: 2;
+ u32 c2h_rpt: 2;
+ u32 dis_tx_null: 1;
+ u32 dis_sw_retry: 1;
+ u32 in_curr_ch: 1;
+ u32 sw_retry_count: 3;
+ u32 tx_null_early: 4;
+ u32 btc_in_2g: 1;
+ u32 pta_en: 1;
+ u32 rfk_by_pass: 1;
+ u32 ch_band_type: 2;
+ u32 rsvd0: 9;
+ u32 duration;
+ u8 courtesy_en;
+ u8 courtesy_num;
+ u8 courtesy_target;
+ u8 rsvd1;
+};
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(3, 0));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(5, 4));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(7, 6));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, BIT(8));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, BIT(9));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, BIT(10));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(13, 11));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(17, 14));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, BIT(18));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_PTA_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, BIT(19));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, BIT(20));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(22, 21));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_DURATION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 2, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 3, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 3, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 3, val, GENMASK(23, 16));
+}
+
+struct rtw89_fw_mcc_start_req {
+ u32 group: 2;
+ u32 btc_in_group: 1;
+ u32 old_group_action: 2;
+ u32 old_group: 2;
+ u32 rsvd0: 9;
+ u32 notify_cnt: 3;
+ u32 rsvd1: 2;
+ u32 notify_rxdbg_en: 1;
+ u32 rsvd2: 2;
+ u32 macid: 8;
+ u32 tsf_low;
+ u32 tsf_high;
+};
+
+static inline void RTW89_SET_FWCMD_START_MCC_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(4, 3));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_OLD_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(6, 5));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(18, 16));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(21));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_TSF_LOW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_START_MCC_TSF_HIGH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 2, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_STOP_MCC_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_STOP_MCC_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(9, 8));
+}
+
+static inline void RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(10));
+}
+
+static inline void RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+struct rtw89_fw_mcc_tsf_req {
+ u8 group: 2;
+ u8 rsvd0: 6;
+ u8 macid_x;
+ u8 macid_y;
+ u8 rsvd1;
+};
+
+static inline void RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(void *cmd,
+ u8 *bitmap, u8 len)
+{
+ memcpy((__le32 *)cmd + 1, bitmap, len);
+}
+
+static inline void RTW89_SET_FWCMD_MCC_SYNC_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+struct rtw89_fw_mcc_duration {
+ u32 group: 2;
+ u32 btc_in_group: 1;
+ u32 rsvd0: 5;
+ u32 start_macid: 8;
+ u32 macid_x: 8;
+ u32 macid_y: 8;
+ u32 start_tsf_low;
+ u32 start_tsf_high;
+ u32 duration_x;
+ u32 duration_y;
+};
+
+static inline void RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(1, 0));
+}
+
+static
+inline void RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(2));
+}
+
+static
+inline void RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+static
+inline void RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(31, 0));
+}
+
+static
+inline void RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 2, val, GENMASK(31, 0));
+}
+
+static
+inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 3, val, GENMASK(31, 0));
+}
+
+static
+inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 4, val, GENMASK(31, 0));
+}
+
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -2540,6 +3127,20 @@ static inline void RTW89_SET_FWCMD_TSF32_TOGL_EARLY(void *cmd, u32 val)
#define RTW89_GET_C2H_LEN(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(13, 0))
+struct rtw89_fw_c2h_attr {
+ u8 category;
+ u8 class;
+ u8 func;
+ u16 len;
+};
+
+static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb)
+{
+ static_assert(sizeof(skb->cb) >= sizeof(struct rtw89_fw_c2h_attr));
+
+ return (struct rtw89_fw_c2h_attr *)skb->cb;
+}
+
#define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2)
#define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN)
@@ -2607,6 +3208,55 @@ static inline void RTW89_SET_FWCMD_TSF32_TOGL_EARLY(void *cmd, u32 val)
#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
+#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+
+#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
+#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 2))
+#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+
+struct rtw89_mac_mcc_tsf_rpt {
+ u32 macid_x;
+ u32 macid_y;
+ u32 tsf_x_low;
+ u32 tsf_x_high;
+ u32 tsf_y_low;
+ u32 tsf_y_high;
+};
+
+static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
+
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(17, 16))
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
+#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+
+#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(5, 0))
+#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 6))
+#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
+#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
+
#define RTW89_FW_HDR_SIZE 32
#define RTW89_FW_SECTION_HDR_SIZE 16
@@ -2676,6 +3326,14 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_LOG_CFG 0x0
#define H2C_FUNC_MAC_GENERAL_PKT 0x1
+/* CLASS 1 - WOW */
+#define H2C_CL_MAC_WOW 0x1
+#define H2C_FUNC_KEEP_ALIVE 0x0
+#define H2C_FUNC_DISCONNECT_DETECT 0x1
+#define H2C_FUNC_WOW_GLOBAL 0x2
+#define H2C_FUNC_WAKEUP_CTRL 0x8
+#define H2C_FUNC_WOW_CAM_UPD 0xC
+
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
#define H2C_FUNC_MAC_LPS_PARM 0x0
@@ -2720,6 +3378,25 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_CL_BA_CAM 0xc
#define H2C_FUNC_MAC_BA_CAM 0x0
+/* CLASS 14 - MCC */
+#define H2C_CL_MCC 0xe
+enum rtw89_mcc_h2c_func {
+ H2C_FUNC_ADD_MCC = 0x0,
+ H2C_FUNC_START_MCC = 0x1,
+ H2C_FUNC_STOP_MCC = 0x2,
+ H2C_FUNC_DEL_MCC_GROUP = 0x3,
+ H2C_FUNC_RESET_MCC_GROUP = 0x4,
+ H2C_FUNC_MCC_REQ_TSF = 0x5,
+ H2C_FUNC_MCC_MACID_BITMAP = 0x6,
+ H2C_FUNC_MCC_SYNC = 0x7,
+ H2C_FUNC_MCC_SET_DURATION = 0x8,
+
+ NUM_OF_RTW89_MCC_H2C_FUNC,
+};
+
+#define RTW89_MCC_WAIT_COND(group, func) \
+ ((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
@@ -2845,6 +3522,38 @@ int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
u8 act, u8 noa_id);
int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool en);
+int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
+int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool enable);
+int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
+int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool enable);
+int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
+int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool enable);
+int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
+ struct rtw89_wow_cam_info *cam_info);
+int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_add_req *p);
+int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_start_req *p);
+int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
+ bool prev_groups);
+int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
+ bool prev_groups);
+int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group);
+int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_tsf_req *req,
+ struct rtw89_mac_mcc_tsf_rpt *rpt);
+int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid,
+ u8 *bitmap);
+int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
+ u8 target, u8 offset);
+int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mcc_duration *p);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 0508dfca8edf..d80050c2e9b3 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -7,6 +7,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "pci.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@@ -31,6 +32,8 @@ const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = {
[RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
[RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR,
[RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_0_V1] = TXD_FIFO_0_BASE_ADDR_V1,
+ [RTW89_MAC_MEM_TXD_FIFO_1_V1] = TXD_FIFO_1_BASE_ADDR_V1,
};
static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset,
@@ -272,106 +275,163 @@ static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
- enum mac_ax_err_info err)
+static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
{
- u32 dmac_err, cmac_err;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 dmac_err;
+ int i, ret;
- if (err != MAC_AX_ERR_L1_ERR_DMAC &&
- err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
- err != MAC_AX_ERR_L0_ERR_CMAC0 &&
- err != MAC_AX_ERR_L0_ERR_CMAC1)
+ ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
+ if (ret) {
+ rtw89_warn(rtwdev, "[DMAC] : DMAC not enabled\n");
return;
+ }
- rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
- rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
-
- cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR);
- rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err);
dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR);
- rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err);
+ rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err);
+ rtw89_info(rtwdev, "R_AX_DMAC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR));
if (dmac_err) {
- rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG));
- rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG));
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1));
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1));
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG));
+ rtw89_info(rtwdev, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG));
+ rtw89_info(rtwdev, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN));
+ rtw89_info(rtwdev, "R_AX_PLE_DBGERR_STS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS));
+ }
}
if (dmac_err & B_AX_WDRLS_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ",
+ rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n",
+ rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
+ if (chip->chip_id == RTL8852C)
+ rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1));
+ else
+ rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
}
if (dmac_err & B_AX_WSEC_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
- rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR));
+ rtw89_info(rtwdev, "R_AX_SEC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG));
+ rtw89_info(rtwdev, "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ rtw89_info(rtwdev, "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ rtw89_info(rtwdev, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ rtw89_info(rtwdev, "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ rtw89_info(rtwdev, "R_AX_SEC_DEBUG1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG1));
+ rtw89_info(rtwdev, "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ rtw89_info(rtwdev, "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+
+ rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
+ B_AX_DBG_SEL0, 0x8B);
+ rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
+ B_AX_DBG_SEL1, 0x8B);
+ rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1,
+ B_AX_SEL_0XC0_MASK, 1);
+ for (i = 0; i < 0x10; i++) {
+ rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
+ B_AX_SEC_DBG_PORT_FIELD_MASK, i);
+ rtw89_info(rtwdev, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
+ i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
+ }
+ } else {
+ rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
+ rtw89_info(rtwdev, "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ rtw89_info(rtwdev, "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ rtw89_info(rtwdev, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ rtw89_info(rtwdev, "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ rtw89_info(rtwdev, "R_AX_SEC_CAM_WDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
+ rtw89_info(rtwdev, "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ rtw89_info(rtwdev, "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ rtw89_info(rtwdev, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
+ rtw89_info(rtwdev, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
+ }
}
if (dmac_err & B_AX_MPDU_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ",
+ rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n",
+ rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
- rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ",
+ rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n",
+ rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
}
if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ",
+ rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n",
+ rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
}
if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ",
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ",
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
- dump_err_status_dispatcher(rtwdev);
}
if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
- rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR));
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR));
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR));
+ } else {
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ }
}
if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ",
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ",
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
@@ -391,86 +451,190 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
- rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
- rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
- rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
- dump_err_status_dispatcher(rtwdev);
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_RX_CTRL0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL0));
+ rtw89_info(rtwdev, "R_AX_RX_CTRL1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL1));
+ rtw89_info(rtwdev, "R_AX_RX_CTRL2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL2));
+ } else {
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ }
}
if (dmac_err & B_AX_PKTIN_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
- rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ",
+ rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n",
+ rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
}
- if (dmac_err & B_AX_DISPATCH_ERR_FLAG)
- dump_err_status_dispatcher(rtwdev);
+ if (dmac_err & B_AX_DISPATCH_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_BBRPT_ERR_FLAG) {
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ } else {
+ rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ }
+ }
- if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ",
- rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR));
+ if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
+ rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
}
+}
+
+static void rtw89_mac_dump_cmac_err_status(struct rtw89_dev *rtwdev,
+ u8 band)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 offset = 0;
+ u32 cmac_err;
+ int ret;
- if (dmac_err & BIT(11)) {
- rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL);
+ if (ret) {
+ if (band)
+ rtw89_warn(rtwdev, "[CMAC] : CMAC1 not enabled\n");
+ else
+ rtw89_warn(rtwdev, "[CMAC] : CMAC0 not enabled\n");
+ return;
}
+ if (band)
+ offset = RTW89_MAC_AX_BAND_REG_OFFSET;
+
+ cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset);
+ rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset));
+ rtw89_info(rtwdev, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset));
+ rtw89_info(rtwdev, "R_AX_CK_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CK_EN + offset));
+
if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ",
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n",
- rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset));
+ rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset));
}
if (cmac_err & B_AX_PTCL_TOP_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ",
- rtw89_read32(rtwdev, R_AX_PTCL_IMR0));
- rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PTCL_ISR0));
+ rtw89_info(rtwdev, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset));
+ rtw89_info(rtwdev, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset));
}
if (cmac_err & B_AX_DMA_TOP_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DLE_CTRL));
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset));
+ rtw89_info(rtwdev, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset));
+ } else {
+ rtw89_info(rtwdev, "R_AX_DLE_CTRL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset));
+ }
}
- if (cmac_err & B_AX_PHYINTF_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR));
+ if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) {
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset));
+ rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ } else {
+ rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ }
}
if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ",
- rtw89_read32(rtwdev, R_AX_TXPWR_IMR));
- rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPWR_ISR));
- }
-
- if (cmac_err & B_AX_WMAC_RX_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ",
- rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL));
- rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset));
+ rtw89_info(rtwdev, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset));
}
if (cmac_err & B_AX_WMAC_TX_ERR_IND) {
- rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ",
- rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR));
- rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL));
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset));
+ rtw89_info(rtwdev, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset));
+ } else {
+ rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset));
+ }
+ rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset));
}
+ rtw89_info(rtwdev, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
+}
+
+static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err)
+{
+ if (err != MAC_AX_ERR_L1_ERR_DMAC &&
+ err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC0 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC1)
+ return;
+
+ rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
+ rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
+
+ rtw89_mac_dump_dmac_err_status(rtwdev);
+ rtw89_mac_dump_cmac_err_status(rtwdev, RTW89_MAC_0);
+ if (rtwdev->dbcc_en)
+ rtw89_mac_dump_cmac_err_status(rtwdev, RTW89_MAC_1);
+
rtwdev->hci.ops->dump_err_status(rtwdev);
if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1)
@@ -1304,6 +1468,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
/* PCIE 64 */
.ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
+ /* 8852A PCIE WOW */
+ .ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,},
};
EXPORT_SYMBOL(rtw89_mac_size);
@@ -1331,6 +1497,60 @@ static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
return cfg;
}
+static bool mac_is_txq_empty(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mac_dle_dfi_qempty qempty;
+ u32 qnum, qtmp, val32, msk32;
+ int i, j, ret;
+
+ qnum = rtwdev->chip->wde_qempty_acq_num;
+ qempty.dle_type = DLE_CTRL_TYPE_WDE;
+
+ for (i = 0; i < qnum; i++) {
+ qempty.grpsel = i;
+ ret = dle_dfi_qempty(rtwdev, &qempty);
+ if (ret) {
+ rtw89_warn(rtwdev, "dle dfi acq empty %d\n", ret);
+ return false;
+ }
+ qtmp = qempty.qempty;
+ for (j = 0 ; j < QEMP_ACQ_GRP_MACID_NUM; j++) {
+ val32 = FIELD_GET(QEMP_ACQ_GRP_QSEL_MASK, qtmp);
+ if (val32 != QEMP_ACQ_GRP_QSEL_MASK)
+ return false;
+ qtmp >>= QEMP_ACQ_GRP_QSEL_SH;
+ }
+ }
+
+ qempty.grpsel = rtwdev->chip->wde_qempty_mgq_sel;
+ ret = dle_dfi_qempty(rtwdev, &qempty);
+ if (ret) {
+ rtw89_warn(rtwdev, "dle dfi mgq empty %d\n", ret);
+ return false;
+ }
+ msk32 = B_CMAC0_MGQ_NORMAL | B_CMAC0_MGQ_NO_PWRSAV | B_CMAC0_CPUMGQ;
+ if ((qempty.qempty & msk32) != msk32)
+ return false;
+
+ if (rtwdev->dbcc_en) {
+ msk32 |= B_CMAC1_MGQ_NORMAL | B_CMAC1_MGQ_NO_PWRSAV | B_CMAC1_CPUMGQ;
+ if ((qempty.qempty & msk32) != msk32)
+ return false;
+ }
+
+ msk32 = B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU |
+ B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU | B_AX_PLE_EMPTY_QTA_DMAC_H2C |
+ B_AX_WDE_EMPTY_QUE_OTHERS | B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX |
+ B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | B_AX_PLE_EMPTY_QTA_DMAC_CPUIO |
+ B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_HIF |
+ B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QTA_DMAC_PKTIN |
+ B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL |
+ B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX;
+ val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0);
+
+ return (val32 & msk32) == msk32;
+}
+
static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
const struct rtw89_dle_size *ple)
{
@@ -1429,10 +1649,8 @@ static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg
#define INVALID_QT_WCPU U16_MAX
#define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \
do { \
- val = ((_min_x) & \
- B_AX_ ## _module ## _MIN_SIZE_MASK) | \
- (((_max_x) << 16) & \
- B_AX_ ## _module ## _MAX_SIZE_MASK); \
+ val = u32_encode_bits(_min_x, B_AX_ ## _module ## _MIN_SIZE_MASK) | \
+ u32_encode_bits(_max_x, B_AX_ ## _module ## _MAX_SIZE_MASK); \
rtw89_write32(rtwdev, \
R_AX_ ## _module ## _QTA ## _idx ## _CFG, \
val); \
@@ -1476,8 +1694,48 @@ static void ple_quota_cfg(struct rtw89_dev *rtwdev,
SET_QUOTA(tx_rpt, PLE, 11);
}
+int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow)
+{
+ const struct rtw89_ple_quota *min_cfg, *max_cfg;
+ const struct rtw89_dle_mem *cfg;
+ u32 val;
+
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ if (rtwdev->mac.qta_mode != RTW89_QTA_SCC) {
+ rtw89_err(rtwdev, "[ERR]support SCC mode only\n");
+ return -EINVAL;
+ }
+
+ if (wow)
+ cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_WOW);
+ else
+ cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_SCC);
+ if (!cfg) {
+ rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n");
+ return -EINVAL;
+ }
+
+ min_cfg = cfg->ple_min_qt;
+ max_cfg = cfg->ple_max_qt;
+ SET_QUOTA(cma0_dma, PLE, 6);
+ SET_QUOTA(cma1_dma, PLE, 7);
+
+ return 0;
+}
#undef SET_QUOTA
+void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable)
+{
+ u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
+}
+
static void dle_quota_cfg(struct rtw89_dev *rtwdev,
const struct rtw89_dle_mem *cfg,
u16 ext_wde_min_qt_wcpu)
@@ -1827,10 +2085,10 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
- enum rtw89_machdr_frame_type type,
- enum rtw89_mac_fwd_target fwd_target,
- u8 mac_idx)
+int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
+ enum rtw89_machdr_frame_type type,
+ enum rtw89_mac_fwd_target fwd_target,
+ u8 mac_idx)
{
u32 reg;
u32 val;
@@ -3114,7 +3372,7 @@ static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL);
}
-static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
+void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
{
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
@@ -3129,8 +3387,7 @@ static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
}
-static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
- bool dlfw)
+int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw)
{
u32 val;
int ret;
@@ -3269,11 +3526,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_mac_disable_cpu(rtwdev);
- ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
- if (ret)
- return ret;
-
ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL);
if (ret)
return ret;
@@ -3348,6 +3600,13 @@ int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
u8 grp = macid >> 5;
int ret;
+ /* If this is called by change_interface() in the case of P2P, it could
+ * be power-off, so ignore this operation.
+ */
+ if (test_bit(RTW89_FLAG_CHANGING_INTERFACE, rtwdev->flags) &&
+ !test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+ return 0;
+
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
if (ret)
return ret;
@@ -3928,6 +4187,164 @@ rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
{
}
+static void
+rtw89_mac_c2h_mcc_rcv_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ u8 group = RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h->data);
+ u8 func = RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h->data);
+
+ switch (func) {
+ case H2C_FUNC_ADD_MCC:
+ case H2C_FUNC_START_MCC:
+ case H2C_FUNC_STOP_MCC:
+ case H2C_FUNC_DEL_MCC_GROUP:
+ case H2C_FUNC_RESET_MCC_GROUP:
+ case H2C_FUNC_MCC_REQ_TSF:
+ case H2C_FUNC_MCC_MACID_BITMAP:
+ case H2C_FUNC_MCC_SYNC:
+ case H2C_FUNC_MCC_SET_DURATION:
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "invalid MCC C2H RCV ACK: func %d\n", func);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MCC C2H RCV ACK: group %d, func %d\n", group, func);
+}
+
+static void
+rtw89_mac_c2h_mcc_req_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ u8 group = RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h->data);
+ u8 func = RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h->data);
+ u8 retcode = RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h->data);
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ bool next = false;
+
+ switch (func) {
+ case H2C_FUNC_MCC_REQ_TSF:
+ next = true;
+ break;
+ case H2C_FUNC_MCC_MACID_BITMAP:
+ case H2C_FUNC_MCC_SYNC:
+ case H2C_FUNC_MCC_SET_DURATION:
+ break;
+ case H2C_FUNC_ADD_MCC:
+ case H2C_FUNC_START_MCC:
+ case H2C_FUNC_STOP_MCC:
+ case H2C_FUNC_DEL_MCC_GROUP:
+ case H2C_FUNC_RESET_MCC_GROUP:
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "invalid MCC C2H REQ ACK: func %d\n", func);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MCC C2H REQ ACK: group %d, func %d, return code %d\n",
+ group, func, retcode);
+
+ if (!retcode && next)
+ return;
+
+ data.err = !!retcode;
+ cond = RTW89_MCC_WAIT_COND(group, func);
+ rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
+}
+
+static void
+rtw89_mac_c2h_mcc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ u8 group = RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h->data);
+ struct rtw89_completion_data data = {};
+ struct rtw89_mac_mcc_tsf_rpt *rpt;
+ unsigned int cond;
+
+ rpt = (struct rtw89_mac_mcc_tsf_rpt *)data.buf;
+ rpt->macid_x = RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h->data);
+ rpt->macid_y = RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h->data);
+ rpt->tsf_x_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h->data);
+ rpt->tsf_x_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h->data);
+ rpt->tsf_y_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h->data);
+ rpt->tsf_y_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h->data);
+
+ cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_REQ_TSF);
+ rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
+}
+
+static void
+rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ u8 group = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h->data);
+ u8 macid = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h->data);
+ u8 status = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h->data);
+ u32 tsf_low = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h->data);
+ u32 tsf_high = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h->data);
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ bool rsp = true;
+ bool err;
+ u8 func;
+
+ switch (status) {
+ case RTW89_MAC_MCC_ADD_ROLE_OK:
+ case RTW89_MAC_MCC_ADD_ROLE_FAIL:
+ func = H2C_FUNC_ADD_MCC;
+ err = status == RTW89_MAC_MCC_ADD_ROLE_FAIL;
+ break;
+ case RTW89_MAC_MCC_START_GROUP_OK:
+ case RTW89_MAC_MCC_START_GROUP_FAIL:
+ func = H2C_FUNC_START_MCC;
+ err = status == RTW89_MAC_MCC_START_GROUP_FAIL;
+ break;
+ case RTW89_MAC_MCC_STOP_GROUP_OK:
+ case RTW89_MAC_MCC_STOP_GROUP_FAIL:
+ func = H2C_FUNC_STOP_MCC;
+ err = status == RTW89_MAC_MCC_STOP_GROUP_FAIL;
+ break;
+ case RTW89_MAC_MCC_DEL_GROUP_OK:
+ case RTW89_MAC_MCC_DEL_GROUP_FAIL:
+ func = H2C_FUNC_DEL_MCC_GROUP;
+ err = status == RTW89_MAC_MCC_DEL_GROUP_FAIL;
+ break;
+ case RTW89_MAC_MCC_RESET_GROUP_OK:
+ case RTW89_MAC_MCC_RESET_GROUP_FAIL:
+ func = H2C_FUNC_RESET_MCC_GROUP;
+ err = status == RTW89_MAC_MCC_RESET_GROUP_FAIL;
+ break;
+ case RTW89_MAC_MCC_SWITCH_CH_OK:
+ case RTW89_MAC_MCC_SWITCH_CH_FAIL:
+ case RTW89_MAC_MCC_TXNULL0_OK:
+ case RTW89_MAC_MCC_TXNULL0_FAIL:
+ case RTW89_MAC_MCC_TXNULL1_OK:
+ case RTW89_MAC_MCC_TXNULL1_FAIL:
+ case RTW89_MAC_MCC_SWITCH_EARLY:
+ case RTW89_MAC_MCC_TBTT:
+ case RTW89_MAC_MCC_DURATION_START:
+ case RTW89_MAC_MCC_DURATION_END:
+ rsp = false;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "invalid MCC C2H STS RPT: status %d\n", status);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MCC C2H STS RPT: group %d, macid %d, status %d, tsf {%d, %d}\n",
+ group, macid, status, tsf_low, tsf_high);
+
+ if (!rsp)
+ return;
+
+ data.err = err;
+ cond = RTW89_MCC_WAIT_COND(group, func);
+ rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3949,6 +4366,25 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
};
+static
+void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MCC_RCV_ACK] = rtw89_mac_c2h_mcc_rcv_ack,
+ [RTW89_MAC_C2H_FUNC_MCC_REQ_ACK] = rtw89_mac_c2h_mcc_req_ack,
+ [RTW89_MAC_C2H_FUNC_MCC_TSF_RPT] = rtw89_mac_c2h_mcc_tsf_rpt,
+ [RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt,
+};
+
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+{
+ switch (class) {
+ default:
+ return false;
+ case RTW89_MAC_C2H_CLASS_MCC:
+ return true;
+ }
+}
+
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func)
{
@@ -3964,6 +4400,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX)
handler = rtw89_mac_c2h_ofld_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MCC:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
+ handler = rtw89_mac_c2h_mcc_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -4819,6 +5259,7 @@ int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_read_xtal_si);
static
void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
@@ -4864,3 +5305,24 @@ void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_pkt_drop_vif_iter,
rtwvif);
}
+
+int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx band)
+{
+ struct rtw89_pkt_drop_params params = {0};
+ bool empty;
+ int i, ret = 0, try_cnt = 3;
+
+ params.mac_band = band;
+ params.sel = RTW89_PKT_DROP_SEL_BAND_ONCE;
+
+ for (i = 0; i < try_cnt; i++) {
+ ret = read_poll_timeout(mac_is_txq_empty, empty, empty, 50,
+ 50000, false, rtwdev);
+ if (ret)
+ rtw89_fw_h2c_pkt_drop(rtwdev, &params);
+ else
+ return 0;
+ }
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 6f4ada1869a1..adb0c86a98d3 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -211,6 +211,51 @@ enum rtw89_mac_dbg_port_sel {
RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL,
RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY,
RTW89_DBG_PORT_SEL_PKTINFO,
+ /* DISPATCHER related */
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX0,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX1,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX2,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX3,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX4,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX5,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX6,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX7,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX8,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TX9,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TXA,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TXB,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TXC,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TXD,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TXE,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_TXF,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX0,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX1,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX3,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX4,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX5,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX6,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX7,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX8,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TX9,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TXA,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TXB,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_TXC,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_RX0,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_RX1,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_RX2,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_RX3,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_RX4,
+ RTW89_DBG_PORT_SEL_DSPT_HDT_RX5,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_0,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_1,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_2,
+ RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P1,
+ RTW89_DBG_PORT_SEL_DSPT_STF_CTRL,
+ RTW89_DBG_PORT_SEL_DSPT_ADDR_CTRL,
+ RTW89_DBG_PORT_SEL_DSPT_WDE_INTF,
+ RTW89_DBG_PORT_SEL_DSPT_PLE_INTF,
+ RTW89_DBG_PORT_SEL_DSPT_FLOW_CTRL,
/* PCIE related */
RTW89_DBG_PORT_SEL_PCIE_TXDMA,
RTW89_DBG_PORT_SEL_PCIE_RXDMA,
@@ -245,6 +290,8 @@ enum rtw89_mac_dbg_port_sel {
#define BCN_IE_CAM1_BASE_ADDR 0x188A0000
#define TXD_FIFO_0_BASE_ADDR 0x18856200
#define TXD_FIFO_1_BASE_ADDR 0x188A1080
+#define TXD_FIFO_0_BASE_ADDR_V1 0x18856400 /* for 8852C */
+#define TXD_FIFO_1_BASE_ADDR_V1 0x188A1080 /* for 8852C */
#define TXDATA_FIFO_0_BASE_ADDR 0x18856000
#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000
#define CPU_LOCAL_BASE_ADDR 0x18003000
@@ -271,6 +318,8 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_TXDATA_FIFO_1,
RTW89_MAC_MEM_CPU_LOCAL,
RTW89_MAC_MEM_BSSID_CAM,
+ RTW89_MAC_MEM_TXD_FIFO_0_V1,
+ RTW89_MAC_MEM_TXD_FIFO_1_V1,
/* keep last */
RTW89_MAC_MEM_NUM,
@@ -319,6 +368,15 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
+enum rtw89_mac_c2h_mcc_func {
+ RTW89_MAC_C2H_FUNC_MCC_RCV_ACK = 0,
+ RTW89_MAC_C2H_FUNC_MCC_REQ_ACK = 1,
+ RTW89_MAC_C2H_FUNC_MCC_TSF_RPT = 2,
+ RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT = 3,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
+};
+
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO,
RTW89_MAC_C2H_CLASS_OFLD,
@@ -329,6 +387,31 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_MAX,
};
+enum rtw89_mac_mcc_status {
+ RTW89_MAC_MCC_ADD_ROLE_OK = 0,
+ RTW89_MAC_MCC_START_GROUP_OK = 1,
+ RTW89_MAC_MCC_STOP_GROUP_OK = 2,
+ RTW89_MAC_MCC_DEL_GROUP_OK = 3,
+ RTW89_MAC_MCC_RESET_GROUP_OK = 4,
+ RTW89_MAC_MCC_SWITCH_CH_OK = 5,
+ RTW89_MAC_MCC_TXNULL0_OK = 6,
+ RTW89_MAC_MCC_TXNULL1_OK = 7,
+
+ RTW89_MAC_MCC_SWITCH_EARLY = 10,
+ RTW89_MAC_MCC_TBTT = 11,
+ RTW89_MAC_MCC_DURATION_START = 12,
+ RTW89_MAC_MCC_DURATION_END = 13,
+
+ RTW89_MAC_MCC_ADD_ROLE_FAIL = 20,
+ RTW89_MAC_MCC_START_GROUP_FAIL = 21,
+ RTW89_MAC_MCC_STOP_GROUP_FAIL = 22,
+ RTW89_MAC_MCC_DEL_GROUP_FAIL = 23,
+ RTW89_MAC_MCC_RESET_GROUP_FAIL = 24,
+ RTW89_MAC_MCC_SWITCH_CH_FAIL = 25,
+ RTW89_MAC_MCC_TXNULL0_FAIL = 26,
+ RTW89_MAC_MCC_TXNULL1_FAIL = 27,
+};
+
struct rtw89_mac_ax_coex {
#define RTW89_MAC_AX_COEX_RTK_MODE 0
#define RTW89_MAC_AX_COEX_CSR_MODE 1
@@ -391,6 +474,7 @@ enum rtw89_mac_bf_rrsc_rate {
#define ACCESS_CMAC(_addr) \
({typeof(_addr) __addr = (_addr); \
__addr >= R_AX_CMAC_REG_START && __addr <= R_AX_CMAC_REG_END; })
+#define RTW89_MAC_AX_BAND_REG_OFFSET 0x2000
#define PTCL_IDLE_POLL_CNT 10000
#define SW_CVR_DUR_US 8
@@ -416,6 +500,17 @@ enum rtw89_mac_bf_rrsc_rate {
#define S_AX_PLE_PAGE_SEL_128 1
#define S_AX_PLE_PAGE_SEL_256 2
+#define B_CMAC0_MGQ_NORMAL BIT(2)
+#define B_CMAC0_MGQ_NO_PWRSAV BIT(3)
+#define B_CMAC0_CPUMGQ BIT(4)
+#define B_CMAC1_MGQ_NORMAL BIT(10)
+#define B_CMAC1_MGQ_NO_PWRSAV BIT(11)
+#define B_CMAC1_CPUMGQ BIT(12)
+
+#define QEMP_ACQ_GRP_MACID_NUM 8
+#define QEMP_ACQ_GRP_QSEL_SH 4
+#define QEMP_ACQ_GRP_QSEL_MASK 0xF
+
#define SDIO_LOCAL_BASE_ADDR 0x80000000
#define PWR_CMD_WRITE 0
@@ -715,6 +810,7 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt46;
const struct rtw89_ple_quota ple_qt47;
const struct rtw89_ple_quota ple_qt58;
+ const struct rtw89_ple_quota ple_qt_52a_wow;
};
extern const struct rtw89_mac_size_set rtw89_mac_size;
@@ -811,6 +907,8 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev);
+int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw);
int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
@@ -830,6 +928,7 @@ static inline int rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
@@ -962,6 +1061,16 @@ static inline void rtw89_mac_ctrl_hci_dma_trx(struct rtw89_dev *rtwdev,
B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
}
+static inline bool rtw89_mac_get_power_state(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+
+ val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE,
+ B_AX_WLMAC_PWR_STE_MASK);
+
+ return !!val;
+}
+
int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool resume, u32 tx_time);
int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
@@ -1010,6 +1119,7 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_PON_EI BIT(1)
#define XTAL_SI_PON_WEI BIT(0)
XTAL_SI_SRAM_CTRL = 0xA1,
+#define XTAL_SI_SRAM_DIS BIT(1)
#define FULL_BIT_MASK GENMASK(7, 0)
};
@@ -1019,5 +1129,12 @@ void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd);
int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
+ enum rtw89_machdr_frame_type type,
+ enum rtw89_mac_fwd_target fwd_target, u8 mac_idx);
+int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow);
+int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx band);
+void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool wow);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index a296bfa8188f..1a99267d710d 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -14,6 +14,7 @@
#include "sar.h"
#include "ser.h"
#include "util.h"
+#include "wow.h"
static void rtw89_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -130,6 +131,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->bcn_hit_cond = 0;
rtwvif->mac_idx = RTW89_MAC_0;
rtwvif->phy_idx = RTW89_PHY_0;
+ rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->hit_rule = 0;
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -173,6 +175,9 @@ static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
enum nl80211_iftype type, bool p2p)
{
struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ set_bit(RTW89_FLAG_CHANGING_INTERFACE, rtwdev->flags);
rtw89_debug(rtwdev, RTW89_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
vif->addr, vif->type, type, vif->p2p, p2p);
@@ -182,7 +187,13 @@ static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
vif->type = type;
vif->p2p = p2p;
- return rtw89_ops_add_interface(hw, vif);
+ ret = rtw89_ops_add_interface(hw, vif);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to change interface %d\n", ret);
+
+ clear_bit(RTW89_FLAG_CHANGING_INTERFACE, rtwdev->flags);
+
+ return ret;
}
static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
@@ -916,6 +927,55 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_PM
+static int rtw89_ops_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
+ cancel_delayed_work_sync(&rtwdev->track_work);
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_wow_suspend(rtwdev, wowlan);
+ mutex_unlock(&rtwdev->mutex);
+
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to suspend for wow %d\n", ret);
+ clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int rtw89_ops_resume(struct ieee80211_hw *hw)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_wow_resume(rtwdev);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to resume for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
+
+ return ret ? 1 : 0;
+}
+
+static void rtw89_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ device_set_wakeup_enable(rtwdev->dev, enabled);
+}
+#endif
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -953,5 +1013,10 @@ const struct ieee80211_ops rtw89_ops = {
.set_sar_specs = rtw89_ops_set_sar_specs,
.sta_rc_update = rtw89_ops_sta_rc_update,
.set_tid_config = rtw89_ops_set_tid_config,
+#ifdef CONFIG_PM
+ .suspend = rtw89_ops_suspend,
+ .resume = rtw89_ops_resume,
+ .set_wakeup = rtw89_ops_set_wakeup,
+#endif
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 5f8e19639362..1c4500ba777c 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -186,6 +186,17 @@ static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
}
}
+static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
+
+ if (enable)
+ rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
+ else
+ rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
+}
+
static bool
rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
struct sk_buff *new,
@@ -256,7 +267,7 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
- new = dev_alloc_skb(desc_info->pkt_size);
+ new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
if (!new)
goto err_sync_device;
@@ -960,8 +971,10 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 bd_cnt, wd_cnt, min_cnt = 0;
struct rtw89_pci_rx_ring *rx_ring;
+ enum rtw89_debug_mask debug_mask;
u32 cnt;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
@@ -985,10 +998,20 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
wd_cnt = wd_ring->curr_num;
min_cnt = min(bd_cnt, wd_cnt);
- if (min_cnt == 0)
- rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP,
+ if (min_cnt == 0) {
+ /* This message can be frequently shown in low power mode or
+ * high traffic with 8852B, and we have recognized it as normal
+ * behavior, so print with mask RTW89_DBG_TXRX in these situations.
+ */
+ if (rtwpci->low_power || chip->chip_id == RTL8852B)
+ debug_mask = RTW89_DBG_TXRX;
+ else
+ debug_mask = RTW89_DBG_UNEXP;
+
+ rtw89_debug(rtwdev, debug_mask,
"still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
wd_cnt, bd_cnt);
+ }
out_unlock:
spin_unlock_bh(&rtwpci->trx_lock);
@@ -2513,7 +2536,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
/* disable all channels except to FW CMD channel to download firmware */
rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
- rtw89_write32_clr(rtwdev, info->dma_stop1.addr, B_AX_STOP_CH12);
+ rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -3771,6 +3794,16 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.recovery_start = rtw89_pci_ops_recovery_start,
.recovery_complete = rtw89_pci_ops_recovery_complete,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie,
+ .ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
+ .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie,
+ .clr_idx_all = rtw89_pci_clr_idx_all,
+ .clear = rtw89_pci_clear_resource,
+ .disable_intr = rtw89_pci_disable_intr_lock,
+ .enable_intr = rtw89_pci_enable_intr_lock,
+ .rst_bdram = rtw89_pci_rst_bdram_pcie,
};
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 179740607778..7d033501d4d9 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -202,6 +202,18 @@
#define B_AX_RXP1DMA_INT BIT(1)
#define B_AX_RXDMA_INT BIT(0)
+#define R_AX_HAXI_IDCT_MSK 0x10B8
+#define B_AX_TXBD_LEN0_ERR_IDCT_MSK BIT(3)
+#define B_AX_TXBD_4KBOUND_ERR_IDCT_MSK BIT(2)
+#define B_AX_RXMDA_STUCK_IDCT_MSK BIT(1)
+#define B_AX_TXMDA_STUCK_IDCT_MSK BIT(0)
+
+#define R_AX_HAXI_IDCT 0x10BC
+#define B_AX_TXBD_LEN0_ERR_IDCT BIT(3)
+#define B_AX_TXBD_4KBOUND_ERR_IDCT BIT(2)
+#define B_AX_RXMDA_STUCK_IDCT BIT(1)
+#define B_AX_TXMDA_STUCK_IDCT BIT(0)
+
#define R_AX_HAXI_HIMR10 0x11E0
#define B_AX_TXDMA_CH11_INT_EN_V1 BIT(1)
#define B_AX_TXDMA_CH10_INT_EN_V1 BIT(0)
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 6a6bdc652e09..017710c580c7 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "coex.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
@@ -9,7 +10,7 @@
#include "ps.h"
#include "reg.h"
#include "sar.h"
-#include "coex.h"
+#include "util.h"
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
@@ -801,6 +802,11 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
+}
+
static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -1036,6 +1042,7 @@ static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
{
const struct rtw89_chip_info *chip = rtwdev->chip;
union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
return;
@@ -1061,6 +1068,11 @@ static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
case 3:
rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
break;
+ case 4:
+ /* This cfg_type is only used by rfe_type >= 50 with eFEM */
+ if (efuse->rfe_type < 50)
+ break;
+ fallthrough;
default:
rtw89_warn(rtwdev,
"bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
@@ -1117,6 +1129,24 @@ out:
return ret;
}
+static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ u32 addr = reg->addr;
+
+ if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
+ addr == 0xfa || addr == 0xf9)
+ return;
+
+ if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
+ return;
+
+ rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
+ (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
+}
+
static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
@@ -1329,7 +1359,7 @@ static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
return rtw89_phy_read32(rtwdev, 0x8080);
}
-void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
+void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
{
void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path, void *data);
@@ -1345,7 +1375,11 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
rf_table = chip->rf_table[path];
rf_reg_info->rf_path = rf_table->rf_path;
- config = rf_table->config ? rf_table->config : rtw89_phy_config_rf_reg;
+ if (noio)
+ config = rtw89_phy_config_rf_reg_noio;
+ else
+ config = rf_table->config ? rf_table->config :
+ rtw89_phy_config_rf_reg;
rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
@@ -1362,13 +1396,15 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
int ret;
/* IQK/DPK clock & reset */
- rtw89_phy_write32_set(rtwdev, 0x0c60, 0x3);
- rtw89_phy_write32_set(rtwdev, 0x0c6c, 0x1);
- rtw89_phy_write32_set(rtwdev, 0x58ac, 0x8000000);
- rtw89_phy_write32_set(rtwdev, 0x78ac, 0x8000000);
+ rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
+ rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
+ rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
+ rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
+ if (chip->chip_id == RTL8852B)
+ rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
/* check 0x8080 */
- rtw89_phy_write32(rtwdev, 0x8000, 0x8);
+ rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
1000, false, rtwdev);
@@ -1419,6 +1455,15 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
}
EXPORT_SYMBOL(rtw89_phy_write32_idx);
+u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
+ addr += rtw89_phy0_phy1_offset(rtwdev, addr);
+ return rtw89_phy_read32_mask(rtwdev, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read32_idx);
+
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val)
{
@@ -1443,23 +1488,21 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
-const u8 rtw89_rs_idx_max[] = {
+static const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_MAX,
[RTW89_RS_OFDM] = RTW89_RATE_OFDM_MAX,
[RTW89_RS_MCS] = RTW89_RATE_MCS_MAX,
[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX,
[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX,
};
-EXPORT_SYMBOL(rtw89_rs_idx_max);
-const u8 rtw89_rs_nss_max[] = {
+static const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_CCK] = 1,
[RTW89_RS_OFDM] = 1,
[RTW89_RS_MCS] = RTW89_NSS_MAX,
[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX,
[RTW89_RS_OFFSET] = 1,
};
-EXPORT_SYMBOL(rtw89_rs_nss_max);
static const u8 _byr_of_rs[] = {
[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
@@ -1501,6 +1544,7 @@ EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
(txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \
})
+static
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc)
{
@@ -1523,7 +1567,6 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
}
-EXPORT_SYMBOL(rtw89_phy_read_txpwr_byrate);
static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
{
@@ -1783,6 +1826,7 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
}
+static
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
@@ -1813,7 +1857,6 @@ void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
break;
}
}
-EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch)
@@ -1962,6 +2005,7 @@ rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
}
}
+static
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
@@ -1992,7 +2036,161 @@ void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
break;
}
}
-EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit_ru);
+
+void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u8 rs[] = {
+ RTW89_RS_CCK,
+ RTW89_RS_OFDM,
+ RTW89_RS_MCS,
+ RTW89_RS_HEDCM,
+ };
+ struct rtw89_rate_desc cur;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 addr, val;
+ s8 v[4] = {};
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr byrate with ch=%d\n", ch);
+
+ BUILD_BUG_ON(rtw89_rs_idx_max[RTW89_RS_CCK] % 4);
+ BUILD_BUG_ON(rtw89_rs_idx_max[RTW89_RS_OFDM] % 4);
+ BUILD_BUG_ON(rtw89_rs_idx_max[RTW89_RS_MCS] % 4);
+ BUILD_BUG_ON(rtw89_rs_idx_max[RTW89_RS_HEDCM] % 4);
+
+ addr = R_AX_PWR_BY_RATE;
+ for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
+ for (i = 0; i < ARRAY_SIZE(rs); i++) {
+ if (cur.nss >= rtw89_rs_nss_max[rs[i]])
+ continue;
+
+ cur.rs = rs[i];
+ for (cur.idx = 0; cur.idx < rtw89_rs_idx_max[rs[i]];
+ cur.idx++) {
+ v[cur.idx % 4] =
+ rtw89_phy_read_txpwr_byrate(rtwdev,
+ band,
+ &cur);
+
+ if ((cur.idx + 1) % 4)
+ continue;
+
+ val = FIELD_PREP(GENMASK(7, 0), v[0]) |
+ FIELD_PREP(GENMASK(15, 8), v[1]) |
+ FIELD_PREP(GENMASK(23, 16), v[2]) |
+ FIELD_PREP(GENMASK(31, 24), v[3]);
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
+ val);
+ addr += 4;
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_set_txpwr_byrate);
+
+void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rate_desc desc = {
+ .nss = RTW89_NSS_1,
+ .rs = RTW89_RS_OFFSET,
+ };
+ u8 band = chan->band_type;
+ s8 v[RTW89_RATE_OFFSET_MAX] = {};
+ u32 val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
+
+ for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++)
+ v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
+
+ BUILD_BUG_ON(RTW89_RATE_OFFSET_MAX != 5);
+ val = FIELD_PREP(GENMASK(3, 0), v[0]) |
+ FIELD_PREP(GENMASK(7, 4), v[1]) |
+ FIELD_PREP(GENMASK(11, 8), v[2]) |
+ FIELD_PREP(GENMASK(15, 12), v[3]) |
+ FIELD_PREP(GENMASK(19, 16), v[4]);
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
+ GENMASK(19, 0), val);
+}
+EXPORT_SYMBOL(rtw89_phy_set_txpwr_offset);
+
+void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_txpwr_limit lmt;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
+ const s8 *ptr;
+ u32 addr, val;
+ u8 i, j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
+
+ BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit) !=
+ RTW89_TXPWR_LMT_PAGE_SIZE);
+
+ addr = R_AX_PWR_LMT;
+ for (i = 0; i < RTW89_NTX_NUM; i++) {
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i);
+
+ ptr = (s8 *)&lmt;
+ for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE;
+ j += 4, addr += 4, ptr += 4) {
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit);
+
+void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_txpwr_limit_ru lmt_ru;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
+ const s8 *ptr;
+ u32 addr, val;
+ u8 i, j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
+
+ BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru) !=
+ RTW89_TXPWR_LMT_RU_PAGE_SIZE);
+
+ addr = R_AX_PWR_RU_LMT;
+ for (i = 0; i < RTW89_NTX_NUM; i++) {
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i);
+
+ ptr = (s8 *)&lmt_ru;
+ for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE;
+ j += 4, addr += 4, ptr += 4) {
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit_ru);
struct rtw89_phy_iter_ra_data {
struct rtw89_dev *rtwdev;
@@ -2106,6 +2304,10 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
handler = rtw89_phy_c2h_ra_handler[func];
break;
+ case RTW89_PHY_C2H_CLASS_DM:
+ if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
+ return;
+ fallthrough;
default:
rtw89_info(rtwdev, "c2h class %d not support\n", class);
return;
@@ -2593,6 +2795,129 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
cfo->packet_count++;
}
+void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
+
+ if (!chip->support_ul_tb_ctrl)
+ return;
+
+ rtwvif->def_tri_idx =
+ rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
+
+ if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
+ rtwvif->dyn_tb_bedge_en = false;
+ else if (chan->band_type >= RTW89_BAND_5G &&
+ chan->band_width >= RTW89_CHANNEL_WIDTH_40)
+ rtwvif->dyn_tb_bedge_en = true;
+ else
+ rtwvif->dyn_tb_bedge_en = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
+ ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
+ rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
+}
+
+struct rtw89_phy_ul_tb_check_data {
+ bool valid;
+ bool high_tf_client;
+ bool low_tf_client;
+ bool dyn_tb_bedge_en;
+ u8 def_tri_idx;
+};
+
+static
+void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_phy_ul_tb_check_data *ul_tb_data)
+{
+ struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ return;
+
+ if (!vif->cfg.assoc)
+ return;
+
+ if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
+ ul_tb_data->high_tf_client = true;
+ else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
+ ul_tb_data->low_tf_client = true;
+
+ ul_tb_data->valid = true;
+ ul_tb_data->def_tri_idx = rtwvif->def_tri_idx;
+ ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en;
+}
+
+void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
+ struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
+ struct rtw89_vif *rtwvif;
+
+ if (!chip->support_ul_tb_ctrl)
+ return;
+
+ if (rtwdev->total_sta_assoc != 1)
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data);
+
+ if (!ul_tb_data.valid)
+ return;
+
+ if (ul_tb_data.dyn_tb_bedge_en) {
+ if (ul_tb_data.high_tf_client) {
+ rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
+ rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ "[ULTB] Turn off if_bandedge\n");
+ } else if (ul_tb_data.low_tf_client) {
+ rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
+ ul_tb_info->def_if_bandedge);
+ rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ "[ULTB] Set to default if_bandedge = %d\n",
+ ul_tb_info->def_if_bandedge);
+ }
+ }
+
+ if (ul_tb_info->dyn_tb_tri_en) {
+ if (ul_tb_data.high_tf_client) {
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
+ B_TXSHAPE_TRIANGULAR_CFG, 0);
+ rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ "[ULTB] Turn off Tx triangle\n");
+ } else if (ul_tb_data.low_tf_client) {
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
+ B_TXSHAPE_TRIANGULAR_CFG,
+ ul_tb_data.def_tri_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ "[ULTB] Set to default tx_shap_idx = %d\n",
+ ul_tb_data.def_tri_idx);
+ }
+ }
+}
+
+static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
+
+ if (!chip->support_ul_tb_ctrl)
+ return;
+
+ ul_tb_info->dyn_tb_tri_en = true;
+ ul_tb_info->def_if_bandedge =
+ rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
+}
+
static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
{
struct rtw89_phy_stat *phystat = &rtwdev->phystat;
@@ -3139,7 +3464,7 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
{
- if (*ie_page > RTW89_PHYSTS_BITMAP_NUM ||
+ if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
*ie_page == RTW89_RSVD_9)
return false;
else if (*ie_page > RTW89_RSVD_9)
@@ -3779,6 +4104,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
+ rtw89_phy_ul_tb_info_init(rtwdev);
rtw89_phy_init_rf_nctl(rtwdev);
rtw89_chip_rfk_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index ee3bc5e111e1..21233f094644 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -64,6 +64,9 @@
#define MAX_CFO_TOLERANCE 30
#define CFO_TF_CNT_TH 300
+#define UL_TB_TF_CNT_L2H_TH 100
+#define UL_TB_TF_CNT_H2L_TH 70
+
#define CCX_MAX_PERIOD 2097
#define CCX_MAX_PERIOD_UNIT 32
#define MS_TO_4US_RATIO 250
@@ -114,6 +117,15 @@ enum rtw89_phy_c2h_ra_func {
RTW89_PHY_C2H_FUNC_RA_MAX,
};
+enum rtw89_phy_c2h_dm_func {
+ RTW89_PHY_C2H_DM_FUNC_FW_TEST,
+ RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT,
+ RTW89_PHY_C2H_DM_FUNC_SIGB,
+ RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY,
+ RTW89_PHY_C2H_DM_FUNC_MCC_DIG,
+ RTW89_PHY_C2H_DM_FUNC_NUM,
+};
+
enum rtw89_phy_c2h_class {
RTW89_PHY_C2H_CLASS_RUA,
RTW89_PHY_C2H_CLASS_RA,
@@ -317,9 +329,6 @@ struct rtw89_nbi_reg_def {
struct rtw89_reg_def notch2_en;
};
-extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
-extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
-
static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
u32 addr, u8 data)
{
@@ -377,6 +386,50 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask);
}
+static inline
+enum rtw89_gain_offset rtw89_subband_to_gain_offset_band_of_ofdm(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_GAIN_OFFSET_2G_OFDM;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_GAIN_OFFSET_5G_LOW;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_GAIN_OFFSET_5G_MID;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_GAIN_OFFSET_5G_HIGH;
+ }
+}
+
+static inline
+enum rtw89_phy_bb_gain_band rtw89_subband_to_bb_gain_band(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH;
+ }
+}
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -450,7 +503,7 @@ bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
-void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
enum rtw89_rf_path rf_path,
@@ -458,20 +511,24 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
+u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
+ enum rtw89_phy_idx phy_idx);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
- const struct rtw89_rate_desc *rate_desc);
-void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- struct rtw89_txpwr_limit *lmt,
- u8 ntx);
-void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch);
+void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
@@ -496,5 +553,7 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx mac_idx,
enum rtw89_tssi_bandedge_cfg bandedge_cfg);
+void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index bf41a1141679..40498812205e 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -59,7 +59,7 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
-static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
if (rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index 0feae3991623..6ac1f7ea5339 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -8,6 +8,7 @@
void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index ca20bb024b40..f2634062f377 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -34,6 +34,9 @@
#define R_AX_SYS_CLK_CTRL 0x0008
#define B_AX_CPU_CLK_EN BIT(14)
+#define R_AX_SYS_SWR_CTRL1 0x0010
+#define B_AX_SYM_CTRL_SPS_PWMFREQ BIT(10)
+
#define R_AX_SYS_ADIE_PAD_PWR_CTRL 0x0018
#define B_AX_SYM_PADPDN_WL_PTA_1P3 BIT(6)
#define B_AX_SYM_PADPDN_WL_RFC_1P3 BIT(5)
@@ -42,6 +45,9 @@
#define B_AX_R_DIS_PRST BIT(6)
#define B_AX_WLOCK_1C_BIT6 BIT(5)
+#define R_AX_AFE_LDO_CTRL 0x0020
+#define B_AX_AON_OFF_PC_EN BIT(23)
+
#define R_AX_EFUSE_CTRL_1 0x0038
#define B_AX_EF_PGPD_MASK GENMASK(30, 28)
#define B_AX_EF_RDT BIT(27)
@@ -118,6 +124,9 @@
#define B_AX_R_AX_BG_LPF BIT(2)
#define B_AX_R_AX_BG GENMASK(1, 0)
+#define R_AX_HCI_LDO_CTRL 0x007A
+#define B_AX_R_AX_VADJ_MASK GENMASK(3, 0)
+
#define R_AX_PLATFORM_ENABLE 0x0088
#define B_AX_AXIDMA_EN BIT(3)
#define B_AX_WCPU_EN BIT(1)
@@ -125,6 +134,7 @@
#define R_AX_WLLPS_CTRL 0x0090
#define B_AX_DIS_WLBT_LPSEN_LOPC BIT(1)
+#define SW_LPS_OPTION 0x0001A0B2
#define R_AX_SCOREBOARD 0x00AC
#define B_AX_TOGGLE BIT(31)
@@ -229,6 +239,13 @@
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+#define R_AX_EECS_EESK_FUNC_SEL 0x02D8
+#define B_AX_PINMUX_EESK_FUNC_SEL_MASK GENMASK(7, 4)
+
+#define R_AX_LED1_FUNC_SEL 0x02DC
+#define B_AX_PINMUX_EESK_FUNC_SEL_V1_MASK GENMASK(27, 24)
+#define PINMUX_EESK_FUNC_SEL_BT_LOG 0x1
+
#define R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN 0x02E4
#define B_AX_LED1_PULL_LOW_EN BIT(18)
#define B_AX_EESK_PULL_LOW_EN BIT(17)
@@ -249,6 +266,10 @@
#define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2)
#define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0)
+#define R_AX_SPS_DIG_OFF_CTRL0 0x0400
+#define B_AX_C3_L1_MASK GENMASK(5, 4)
+#define B_AX_C1_L1_MASK GENMASK(1, 0)
+
#define R_AX_AFE_OFF_CTRL1 0x0444
#define B_AX_S1_LDO_VSEL_F_MASK GENMASK(25, 24)
#define B_AX_S1_LDO2PWRCUT_F BIT(23)
@@ -305,8 +326,7 @@
#define R_AX_PCIE_DBG_CTRL 0x11C0
#define B_AX_DBG_DUMMY_MASK GENMASK(23, 16)
-#define B_AX_DBG_SEL_MASK GENMASK(15, 13)
-#define B_AX_PCIE_DBG_SEL BIT(12)
+#define B_AX_PCIE_DBG_SEL_MASK GENMASK(15, 13)
#define B_AX_MRD_TIMEOUT_EN BIT(10)
#define B_AX_ASFF_FULL_NO_STK BIT(1)
#define B_AX_EN_STUCK_DBG BIT(0)
@@ -445,6 +465,7 @@
#define B_AX_DISPATCHER_EN BIT(18)
#define B_AX_BBRPT_EN BIT(17)
#define B_AX_MAC_SEC_EN BIT(16)
+#define B_AX_DMACREG_GCKEN BIT(15)
#define B_AX_MAC_UN_EN BIT(15)
#define B_AX_H_AXIDMA_EN BIT(14)
@@ -523,6 +544,19 @@
#define B_AX_WDE_EMPTY_QUE_CMAC0_MBH BIT(1)
#define B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0)
+#define R_AX_DLE_EMPTY1 0x8434
+#define B_AX_PLE_EMPTY_QTA_DMAC_WDRLS BIT(20)
+#define B_AX_PLE_EMPTY_QTA_CMAC1_DMA_BBRPT BIT(19)
+#define B_AX_PLE_EMPTY_QTA_CMAC1_DMA_RX BIT(18)
+#define B_AX_PLE_EMPTY_QTA_CMAC0_DMA_RX BIT(17)
+#define B_AX_PLE_EMPTY_QTA_DMAC_C2H BIT(16)
+#define B_AX_PLE_EMPTY_QUE_DMAC_PLRLS BIT(5)
+#define B_AX_PLE_EMPTY_QUE_DMAC_CPUIO BIT(4)
+#define B_AX_PLE_EMPTY_QUE_DMAC_SEC_RX BIT(3)
+#define B_AX_PLE_EMPTY_QUE_DMAC_MPDU_RX BIT(2)
+#define B_AX_PLE_EMPTY_QUE_DMAC_HDP BIT(1)
+#define B_AX_WDE_EMPTY_QUE_DMAC_WDRLS BIT(0)
+
#define R_AX_DMAC_ERR_IMR 0x8520
#define B_AX_DLE_CPUIO_ERR_INT_EN BIT(10)
#define B_AX_APB_BRIDGE_ERR_INT_EN BIT(9)
@@ -539,6 +573,10 @@
#define DMAC_ERR_IMR_DIS 0
#define R_AX_DMAC_ERR_ISR 0x8524
+#define B_AX_HAXIDMA_ERR_FLAG BIT(14)
+#define B_AX_PAXIDMA_ERR_FLAG BIT(13)
+#define B_AX_HCI_BUF_ERR_FLAG BIT(12)
+#define B_AX_BBRPT_ERR_FLAG BIT(11)
#define B_AX_DLE_CPUIO_ERR_FLAG BIT(10)
#define B_AX_APB_BRIDGE_ERR_FLAG BIT(9)
#define B_AX_DISPATCH_ERR_FLAG BIT(8)
@@ -917,6 +955,14 @@
B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \
B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN)
+#define R_AX_DISPATCHER_DBG_PORT 0x8860
+#define B_AX_DISPATCHER_DBG_SEL_MASK GENMASK(11, 8)
+#define B_AX_DISPATCHER_INTN_SEL_MASK GENMASK(7, 4)
+#define B_AX_DISPATCHER_CH_SEL_MASK GENMASK(3, 0)
+
+#define R_AX_RX_FUNCTION_STOP 0x8920
+#define B_AX_HDR_RX_STOP BIT(0)
+
#define R_AX_HCI_FC_CTRL 0x8A00
#define B_AX_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10)
#define B_AX_HCI_FC_WP_CH811_FULL_COND_MASK GENMASK(9, 8)
@@ -998,7 +1044,13 @@
#define R_AX_WDE_ERRFLAG_MSG 0x8C30
#define B_AX_WDE_ERR_FLAG_MSG_MASK GENMASK(31, 0)
-#define R_AX_WDE_ERR_FLAG_CFG 0x8C34
+#define R_AX_WDE_ERR_FLAG_CFG_NUM1 0x8C34
+#define B_AX_WDE_ERR_FLAG_NUM1_VLD BIT(31)
+#define B_AX_WDE_ERR_FLAG_NUM1_MSTIDX_MASK GENMASK(27, 24)
+#define B_AX_WDE_ERR_FLAG_NUM1_ISRIDX_MASK GENMASK(20, 16)
+#define B_AX_WDE_DATCHN_FRZTMR_MODE BIT(2)
+#define B_AX_WDE_QUEMGN_FRZTMR_MODE BIT(1)
+#define B_AX_WDE_BUFMGN_FRZTMR_MODE BIT(0)
#define R_AX_WDE_ERR_IMR 0x8C38
#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
@@ -1182,7 +1234,59 @@
#define B_AX_PLE_START_BOUND_MASK GENMASK(13, 8)
#define B_AX_PLE_PAGE_SEL_MASK GENMASK(1, 0)
#define B_AX_PLE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
-#define R_AX_PLE_ERR_FLAG_CFG 0x9034
+
+#define R_AX_PLE_DBGERR_LOCKEN 0x9020
+#define B_AX_PLE_LOCKEN_DLEPIF07 BIT(7)
+#define B_AX_PLE_LOCKEN_DLEPIF06 BIT(6)
+#define B_AX_PLE_LOCKEN_DLEPIF05 BIT(5)
+#define B_AX_PLE_LOCKEN_DLEPIF04 BIT(4)
+#define B_AX_PLE_LOCKEN_DLEPIF03 BIT(3)
+#define B_AX_PLE_LOCKEN_DLEPIF02 BIT(2)
+#define B_AX_PLE_LOCKEN_DLEPIF01 BIT(1)
+#define B_AX_PLE_LOCKEN_DLEPIF00 BIT(0)
+
+#define R_AX_PLE_DBGERR_STS 0x9024
+#define B_AX_PLE_LOCKON_DLEPIF07 BIT(7)
+#define B_AX_PLE_LOCKON_DLEPIF06 BIT(6)
+#define B_AX_PLE_LOCKON_DLEPIF05 BIT(5)
+#define B_AX_PLE_LOCKON_DLEPIF04 BIT(4)
+#define B_AX_PLE_LOCKON_DLEPIF03 BIT(3)
+#define B_AX_PLE_LOCKON_DLEPIF02 BIT(2)
+#define B_AX_PLE_LOCKON_DLEPIF01 BIT(1)
+#define B_AX_PLE_LOCKON_DLEPIF00 BIT(0)
+
+#define R_AX_PLE_ERR_FLAG_CFG_NUM1 0x9034
+#define B_AX_PLE_ERR_FLAG_NUM1_VLD BIT(31)
+#define B_AX_PLE_ERR_FLAG_NUM1_MSTIDX_MASK GENMASK(27, 24)
+#define B_AX_PLE_ERR_FLAG_NUM1_ISRIDX_MASK GENMASK(20, 16)
+#define B_AX_PLE_DATCHN_FRZTMR_MODE BIT(2)
+#define B_AX_PLE_QUEMGN_FRZTMR_MODE BIT(1)
+#define B_AX_PLE_BUFMGN_FRZTMR_MODE BIT(0)
+
+#define R_AX_PLE_ERRFLAG_MSG 0x9030
+#define B_AX_PLE_ERR_FLAG_MSG_MASK GENMASK(31, 0)
+#define B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9)
+#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8)
+#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7)
+#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6)
+#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5)
+#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4)
+#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3)
+#define B_AX_PLE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_AX_PLE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_AX_PLE_DATCHN_CAMREQ_ERR BIT(29)
+#define B_AX_PLE_DATCHN_ADRERR_ERR BIT(28)
+#define B_AX_PLE_BUFMGN_FRZTO_ERR_V1 BIT(9)
+#define B_AX_PLE_GETNPG_PGOFST_ERR_V1 BIT(8)
+#define B_AX_PLE_GETNPG_STRPG_ERR_V1 BIT(7)
+#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_V1 BIT(6)
+#define B_AX_PLE_BUFRTN_SIZE_ERR_V1 BIT(5)
+#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_V1 BIT(4)
+#define B_AX_PLE_BUFREQ_UNAVAL_ERR_V1 BIT(3)
+#define B_AX_PLE_BUFREQ_SIZELMT_ERR BIT(2)
+#define B_AX_PLE_BUFREQ_SIZE0_ERR BIT(1)
#define R_AX_PLE_ERR_IMR 0x9038
#define B_AX_PLE_DATCHN_RRDY_ERR_INT_EN BIT(27)
@@ -1393,6 +1497,19 @@
#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR BIT(16)
#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0)
+#define R_AX_BBRPT_COM_ERR_ISR 0x960C
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_V1 BIT(0)
+
+#define R_AX_BBRPT_CHINFO_ERR_ISR 0x962C
+#define B_AX_BBPRT_CHIF_TO_ERR_V1 BIT(7)
+#define B_AX_BBPRT_CHIF_NULL_ERR_V1 BIT(6)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR_V1 BIT(5)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR_V1 BIT(4)
+#define B_AX_BBPRT_CHIF_HDRL_ERR_V1 BIT(3)
+#define B_AX_BBPRT_CHIF_BOVF_ERR_V1 BIT(2)
+#define B_AX_BBPRT_CHIF_OVF_ERR_V1 BIT(1)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR_V1 BIT(0)
+
#define R_AX_BBRPT_CHINFO_ERR_IMR 0x9628
#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7)
#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6)
@@ -1444,6 +1561,9 @@
#define B_AX_BBRPT_DFS_TO_ERR BIT(16)
#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0)
+#define R_AX_BBRPT_DFS_ERR_ISR 0x963C
+#define B_AX_BBRPT_DFS_TO_ERR_V1 BIT(0)
+
#define R_AX_LA_ERRFLAG 0x966C
#define B_AX_LA_ISR_DATA_LOSS_ERR BIT(16)
#define B_AX_LA_IMR_DATA_LOSS_ERR BIT(0)
@@ -1535,6 +1655,8 @@
#define R_AX_ACTION_FWD0 0x9C04
#define TRXCFG_MPDU_PROC_ACT_FRWD 0x02A95A95
+#define R_AX_ACTION_FWD1 0x9C08
+
#define R_AX_TF_FWD 0x9C14
#define TRXCFG_MPDU_PROC_TF_FRWD 0x0000AA55
@@ -1546,6 +1668,9 @@
#define R_AX_CUT_AMSDU_CTRL 0x9C40
#define TRXCFG_MPDU_PROC_CUT_CTRL 0x010E05F0
+#define R_AX_WOW_CTRL 0x9C50
+#define B_AX_WOW_WOWEN BIT(1)
+
#define R_AX_MPDU_RX_ERR_ISR 0x9CF0
#define R_AX_MPDU_RX_ERR_IMR 0x9CF4
#define B_AX_RPT_ERR_INT_EN BIT(3)
@@ -1554,6 +1679,7 @@
#define B_AX_MPDU_RX_IMR_SET_V1 B_AX_RPT_ERR_INT_EN
#define R_AX_SEC_ENG_CTRL 0x9D00
+#define B_AX_SEC_DBG_PORT_FIELD_MASK GENMASK(19, 16)
#define B_AX_TX_PARTIAL_MODE BIT(11)
#define B_AX_CLK_EN_CGCMP BIT(10)
#define B_AX_CLK_EN_WAPI BIT(9)
@@ -1583,12 +1709,21 @@
#define R_AX_SEC_TX_DEBUG 0x9D20
#define R_AX_SEC_RX_DEBUG 0x9D24
#define R_AX_SEC_TRX_PKT_CNT 0x9D28
+
+#define R_AX_SEC_DEBUG2 0x9D28
+#define B_AX_DBG_READ_SH 2
+#define B_AX_DBG_READ_MSK 0x3fffffff
+
#define R_AX_SEC_TRX_BLK_CNT 0x9D2C
#define R_AX_SEC_ERROR_FLAG_IMR 0x9D2C
#define B_AX_RX_HANG_IMR BIT(1)
#define B_AX_TX_HANG_IMR BIT(0)
+#define R_AX_SEC_ERROR_FLAG 0x9D30
+#define B_AX_RX_HANG_ERROR_V1 BIT(1)
+#define B_AX_TX_HANG_ERROR_V1 BIT(0)
+
#define R_AX_SS_CTRL 0x9E10
#define B_AX_SS_INIT_DONE_1 BIT(31)
#define B_AX_SS_WARM_INIT_FLG BIT(29)
@@ -1723,6 +1858,28 @@
B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG)
+#define R_AX_TXPKTCTL_B0_ERRFLAG_ISR 0x9F7C
+#define B_AX_B0_ISR_ERR_PRELD_EVT3 BIT(23)
+#define B_AX_B0_ISR_ERR_PRELD_EVT2 BIT(22)
+#define B_AX_B0_ISR_ERR_PRELD_ENTNUMCFG BIT(21)
+#define B_AX_B0_ISR_ERR_PRELD_RLSPKTSZERR BIT(20)
+#define B_AX_B0_ISR_ERR_MPDUIF_ERR1 BIT(19)
+#define B_AX_B0_ISR_ERR_MPDUIF_DATAERR BIT(18)
+#define B_AX_B0_ISR_ERR_MPDUINFO_ERR1 BIT(17)
+#define B_AX_B0_ISR_ERR_MPDUINFO_RECFG BIT(16)
+#define B_AX_B0_ISR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_AX_B0_ISR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_AX_B0_ISR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_AX_B0_ISR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_AX_B0_ISR_ERR_USRCTL_EVT7 BIT(7)
+#define B_AX_B0_ISR_ERR_USRCTL_EVT6 BIT(6)
+#define B_AX_B0_ISR_ERR_USRCTL_EVT5 BIT(5)
+#define B_AX_B0_ISR_ERR_USRCTL_EVT4 BIT(4)
+#define B_AX_B0_ISR_ERR_USRCTL_RLSBMPLEN BIT(3)
+#define B_AX_B0_ISR_ERR_USRCTL_RDNRLSCMD BIT(2)
+#define B_AX_B0_ISR_ERR_USRCTL_NOINIT BIT(1)
+#define B_AX_B0_ISR_ERR_USRCTL_REINIT BIT(0)
+
#define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88
#define B_AX_B1_PRELD_FEN BIT(31)
#define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
@@ -1770,6 +1927,28 @@
B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG)
+#define R_AX_TXPKTCTL_B1_ERRFLAG_ISR 0x9FBC
+#define B_AX_B1_ISR_ERR_PRELD_EVT3 BIT(23)
+#define B_AX_B1_ISR_ERR_PRELD_EVT2 BIT(22)
+#define B_AX_B1_ISR_ERR_PRELD_ENTNUMCFG BIT(21)
+#define B_AX_B1_ISR_ERR_PRELD_RLSPKTSZERR BIT(20)
+#define B_AX_B1_ISR_ERR_MPDUIF_ERR1 BIT(19)
+#define B_AX_B1_ISR_ERR_MPDUIF_DATAERR BIT(18)
+#define B_AX_B1_ISR_ERR_MPDUINFO_ERR1 BIT(17)
+#define B_AX_B1_ISR_ERR_MPDUINFO_RECFG BIT(16)
+#define B_AX_B1_ISR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_AX_B1_ISR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_AX_B1_ISR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_AX_B1_ISR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_AX_B1_ISR_ERR_USRCTL_EVT7 BIT(7)
+#define B_AX_B1_ISR_ERR_USRCTL_EVT6 BIT(6)
+#define B_AX_B1_ISR_ERR_USRCTL_EVT5 BIT(5)
+#define B_AX_B1_ISR_ERR_USRCTL_EVT4 BIT(4)
+#define B_AX_B1_ISR_ERR_USRCTL_RLSBMPLEN BIT(3)
+#define B_AX_B1_ISR_ERR_USRCTL_RDNRLSCMD BIT(2)
+#define B_AX_B1_ISR_ERR_USRCTL_NOINIT BIT(1)
+#define B_AX_B1_ISR_ERR_USRCTL_REINIT BIT(0)
+
#define R_AX_AFE_CTRL1 0x0024
#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
@@ -2338,6 +2517,41 @@
#define B_AX_DLE_IMR_SET (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
B_AX_RXDATA_FSM_HANG_ERROR_IMR)
+#define R_AX_RX_ERR_FLAG 0xC800
+#define R_AX_RX_ERR_FLAG_C1 0xE800
+#define B_AX_RX_GET_NO_PAGE_ERR BIT(31)
+#define B_AX_RX_GET_NULL_PKT_ERR BIT(30)
+#define B_AX_RX_RU0_FSM_HANG_ERR BIT(29)
+#define B_AX_RX_RU1_FSM_HANG_ERR BIT(28)
+#define B_AX_RX_RU2_FSM_HANG_ERR BIT(27)
+#define B_AX_RX_RU3_FSM_HANG_ERR BIT(26)
+#define B_AX_RX_RU4_FSM_HANG_ERR BIT(25)
+#define B_AX_RX_RU5_FSM_HANG_ERR BIT(24)
+#define B_AX_RX_RU6_FSM_HANG_ERR BIT(23)
+#define B_AX_RX_RU7_FSM_HANG_ERR BIT(22)
+#define B_AX_RX_RXSTS_FSM_HANG_ERR BIT(21)
+#define B_AX_RX_CSI_FSM_HANG_ERR BIT(20)
+#define B_AX_RX_TXRPT_FSM_HANG_ERR BIT(19)
+#define B_AX_RX_F2PCMD_FSM_HANG_ERR BIT(18)
+#define B_AX_RX_RU0_ZERO_LEN_ERR BIT(17)
+#define B_AX_RX_RU1_ZERO_LEN_ERR BIT(16)
+#define B_AX_RX_RU2_ZERO_LEN_ERR BIT(15)
+#define B_AX_RX_RU3_ZERO_LEN_ERR BIT(14)
+#define B_AX_RX_RU4_ZERO_LEN_ERR BIT(13)
+#define B_AX_RX_RU5_ZERO_LEN_ERR BIT(12)
+#define B_AX_RX_RU6_ZERO_LEN_ERR BIT(11)
+#define B_AX_RX_RU7_ZERO_LEN_ERR BIT(10)
+#define B_AX_RX_RXSTS_ZERO_LEN_ERR BIT(9)
+#define B_AX_RX_CSI_ZERO_LEN_ERR BIT(8)
+#define B_AX_PLE_DATA_OPT_FSM_HANG BIT(7)
+#define B_AX_PLE_RXDATA_REQ_BUF_FSM_HANG BIT(6)
+#define B_AX_PLE_TXRPT_REQ_BUF_FSM_HANG BIT(5)
+#define B_AX_PLE_WD_OPT_FSM_HANG BIT(4)
+#define B_AX_PLE_ENQ_FSM_HANG BIT(3)
+#define B_AX_RXDATA_ENQUE_ORDER_ERR BIT(2)
+#define B_AX_RXSTS_ENQUE_ORDER_ERR BIT(1)
+#define B_AX_RX_CSI_PKT_NUM_ERR BIT(0)
+
#define R_AX_RXDMA_CTRL_0 0xC804
#define R_AX_RXDMA_CTRL_0_C1 0xE804
#define B_AX_RXDMA_DBGOUT_EN BIT(31)
@@ -2360,6 +2574,49 @@
B_AX_RU2_PTR_FULL_MODE | B_AX_RU3_PTR_FULL_MODE | \
B_AX_CSI_PTR_FULL_MODE | B_AX_RXSTS_PTR_FULL_MODE)
+#define R_AX_RX_CTRL0 0xC808
+#define R_AX_RX_CTRL0_C1 0xE808
+#define B_AX_DLE_CLOCK_FORCE_V1 BIT(31)
+#define B_AX_TXDMA_CLOCK_FORCE_V1 BIT(30)
+#define B_AX_RXDMA_CLOCK_FORCE_V1 BIT(29)
+#define B_AX_RXDMA_DEFAULT_PAGE_V1_MASK GENMASK(28, 24)
+#define B_AX_RXDMA_CSI_TGT_QUEID_MASK GENMASK(23, 18)
+#define B_AX_RXDMA_CSI_TGT_PRID_MASK GENMASK(17, 15)
+#define B_AX_RXDMA_DIS_CSI_RELEASE_V1 BIT(14)
+#define B_AX_CSI_PTR_FULL_MODE_V1 BIT(13)
+#define B_AX_RXDATA_PTR_FULL_MODE BIT(12)
+#define B_AX_RXSTS_PTR_FULL_MODE_V1 BIT(11)
+#define B_AX_TXRPT_FULL_RSV_DEPTH_V1_MASK GENMASK(10, 8)
+#define B_AX_RXDATA_FULL_RSV_DEPTH_MASK GENMASK(7, 5)
+#define B_AX_RXSTS_FULL_RSV_DEPTH_V1_MASK GENMASK(4, 2)
+#define B_AX_ORDER_FIFO_MASK GENMASK(1, 0)
+
+#define R_AX_RX_CTRL1 0xC80C
+#define R_AX_RX_CTRL1_C1 0xE80C
+#define B_AX_RXDMA_TXRPT_QUEUE_ID_SW_EN BIT(31)
+#define B_AX_RXDMA_TXRPT_QUEUE_ID_SW_V1_MASK GENMASK(30, 25)
+#define B_AX_RXDMA_F2PCMD_QUEUE_ID_SW_EN BIT(24)
+#define B_AX_RXDMA_F2PCMD_QUEUE_ID_SW_V1_MASK GENMASK(23, 18)
+#define B_AX_RXDMA_TXRPT_QUEUE_ID_TGT_SW_EN BIT(17)
+#define B_AX_RXDMA_TXRPT_QUEUE_ID_TGT_SW_1_MASK GENMASK(16, 11)
+#define B_AX_RXDMA_F2PCMD_QUEUE_ID_TGT_SW_EN BIT(10)
+#define B_AX_RXDMA_F2PCMD_QUEUE_ID_TGT_SW_1_MASK GENMASK(9, 4)
+#define B_AX_ORDER_FIFO_OUT BIT(3)
+#define B_AX_ORDER_FIFO_EMPTY BIT(2)
+#define B_AX_DBG_SEL_MASK GENMASK(1, 0)
+
+#define R_AX_RX_CTRL2 0xC810
+#define R_AX_RX_CTRL2_C1 0xE810
+#define B_AX_DLE_WDE_STATE_V1_MASK GENMASK(31, 30)
+#define B_AX_DLE_PLE_STATE_V1_MASK GENMASK(29, 28)
+#define B_AX_DLE_REQ_BUF_STATE_MASK GENMASK(27, 26)
+#define B_AX_DLE_ENQ_STATE_V1 BIT(25)
+#define B_AX_RX_DBG_SEL_MASK GENMASK(24, 19)
+#define B_AX_MACRX_CS_MASK GENMASK(18, 14)
+#define B_AX_RXSTS_CS_MASK GENMASK(13, 9)
+#define B_AX_ERR_INDICATOR BIT(5)
+#define B_AX_TXRPT_CS_MASK GENMASK(4, 0)
+
#define R_AX_RXDMA_PKT_INFO_0 0xC814
#define R_AX_RXDMA_PKT_INFO_1 0xC818
#define R_AX_RXDMA_PKT_INFO_2 0xC81C
@@ -2667,6 +2924,18 @@
B_AX_TMAC_MIMO_CTRL | \
B_AX_RMAC_FTM)
+#define R_AX_TRXPTCL_ERROR_INDICA 0xCCC0
+#define R_AX_TRXPTCL_ERROR_INDICA_C1 0xECC0
+#define B_AX_FTM_ERROR_FLAG_CLR BIT(8)
+#define B_AX_CSI_ERROR_FLAG_CLR BIT(7)
+#define B_AX_MIMOCTRL_ERROR_FLAG_CLR BIT(6)
+#define B_AX_RXTB_ERROR_FLAG_CLR BIT(5)
+#define B_AX_HWSIGB_GEN_ERROR_FLAG_CLR BIT(4)
+#define B_AX_TXPLCP_ERROR_FLAG_CLR BIT(3)
+#define B_AX_RESP_ERROR_FLAG_CLR BIT(2)
+#define B_AX_TXCTL_ERROR_FLAG_CLR BIT(1)
+#define B_AX_MACTX_ERROR_FLAG_CLR BIT(0)
+
#define R_AX_WMAC_TX_TF_INFO_0 0xCCD0
#define R_AX_WMAC_TX_TF_INFO_0_C1 0xECD0
#define B_AX_WMAC_TX_TF_INFO_SEL_MASK GENMASK(2, 0)
@@ -2991,6 +3260,7 @@
#define R_AX_PWR_RATE_CTRL 0xD200
#define R_AX_PWR_RATE_CTRL_C1 0xF200
+#define B_AX_PWR_REF GENMASK(27, 10)
#define B_AX_FORCE_PWR_BY_RATE_EN BIT(9)
#define B_AX_FORCE_PWR_BY_RATE_VALUE_MASK GENMASK(8, 0)
@@ -3128,6 +3398,7 @@
#define BTC_BREAK_PARAM 0xf0ffffff
#define R_BTC_BT_COEX_MSK_TABLE 0xDA30
+#define B_BTC_PRI_MASK_RXCCK_V1 BIT(28)
#define B_BTC_PRI_MASK_TX_RESP_V1 BIT(3)
#define R_AX_BT_COEX_CFG_2 0xDA34
@@ -3271,8 +3542,10 @@
#define RR_MOD_IQK GENMASK(19, 4)
#define RR_MOD_DPK GENMASK(19, 5)
#define RR_MOD_MASK GENMASK(19, 16)
+#define RR_MOD_RGM GENMASK(13, 4)
#define RR_MOD_V_DOWN 0x0
#define RR_MOD_V_STANDBY 0x1
+#define RR_TXAGC 0x10001
#define RR_MOD_V_TX 0x2
#define RR_MOD_V_RX 0x3
#define RR_MOD_V_TXIQK 0x4
@@ -3308,6 +3581,10 @@
#define CFGCH_BAND1_2G 0
#define CFGCH_BAND1_5G 1
#define CFGCH_BAND1_6G 3
+#define RR_CFGCH_POW_LCK BIT(15)
+#define RR_CFGCH_TRX_AH BIT(14)
+#define RR_CFGCH_BCN BIT(13)
+#define RR_CFGCH_BW2 BIT(12)
#define RR_CFGCH_BAND0 GENMASK(9, 8)
#define CFGCH_BAND0_2G 0
#define CFGCH_BAND0_5G 1
@@ -3340,6 +3617,7 @@
#define RR_RXK_PLLEN BIT(5)
#define RR_LUTWA 0x33
#define RR_LUTWA_MASK GENMASK(9, 0)
+#define RR_LUTWA_M1 GENMASK(7, 0)
#define RR_LUTWA_M2 GENMASK(4, 0)
#define RR_LUTWD1 0x3e
#define RR_LUTWD0 0x3f
@@ -3359,6 +3637,8 @@
#define RR_TXGA_TRK_EN BIT(7)
#define RR_TXGA_LOK_EXT GENMASK(4, 0)
#define RR_TXGA_LOK_EN BIT(0)
+#define RR_TXGA_V1 0x10055
+#define RR_TXGA_V1_TRK_EN BIT(7)
#define RR_GAINTX 0x56
#define RR_GAINTX_ALL GENMASK(15, 0)
#define RR_GAINTX_PAD GENMASK(9, 5)
@@ -3387,6 +3667,8 @@
#define RR_TXA2_LDO GENMASK(19, 16)
#define RR_TRXIQ 0x66
#define RR_RSV6 0x6d
+#define RR_TXVBUF 0x7c
+#define RR_TXVBUF_DACEN BIT(5)
#define RR_TXPOW 0x7f
#define RR_TXPOW_TXA BIT(8)
#define RR_TXPOW_TXAS BIT(7)
@@ -3397,6 +3679,7 @@
#define RR_RXBB_VOBUF GENMASK(15, 12)
#define RR_RXBB_C2G GENMASK(16, 10)
#define RR_RXBB_C1G GENMASK(9, 8)
+#define RR_RXBB_FATT GENMASK(7, 0)
#define RR_RXBB_ATTR GENMASK(7, 4)
#define RR_RXBB_ATTC GENMASK(2, 0)
#define RR_RXG 0x84
@@ -3407,10 +3690,14 @@
#define RR_RXAE_IQKMOD GENMASK(3, 0)
#define RR_RXA 0x8a
#define RR_RXA_DPK GENMASK(9, 8)
+#define RR_RXA_LNA 0x8b
#define RR_RXA2 0x8c
+#define RR_RAA2_SWATT GENMASK(15, 9)
#define RR_RXA2_C1 GENMASK(12, 10)
#define RR_RXA2_C2 GENMASK(9, 3)
+#define RR_RXA2_CC2 GENMASK(8, 7)
#define RR_RXA2_IATT GENMASK(7, 4)
+#define RR_RXA2_HATT GENMASK(6, 0)
#define RR_RXA2_ATT GENMASK(3, 0)
#define RR_RXIQGEN 0x8d
#define RR_RXIQGEN_ATTL GENMASK(12, 8)
@@ -3422,6 +3709,7 @@
#define RR_RXBB2_IDAC GENMASK(11, 9)
#define RR_RXBB2_EBW GENMASK(6, 5)
#define RR_XALNA2 0x90
+#define RR_XALNA2_SW2 GENMASK(9, 8)
#define RR_XALNA2_SW GENMASK(1, 0)
#define RR_DCK 0x92
#define RR_DCK_DONE GENMASK(7, 5)
@@ -3439,18 +3727,36 @@
#define RR_IQGEN_BIAS GENMASK(11, 8)
#define RR_TXIQK 0x98
#define RR_TXIQK_ATT2 GENMASK(15, 12)
+#define RR_TXIQK_ATT1 GENMASK(6, 0)
#define RR_TIA 0x9e
#define RR_TIA_N6 BIT(8)
#define RR_MIXER 0x9f
#define RR_MIXER_GN GENMASK(4, 3)
+#define RR_POW 0xa0
+#define RR_POW_SYN GENMASK(3, 2)
#define RR_LOGEN 0xa3
#define RR_LOGEN_RPT GENMASK(19, 16)
+#define RR_SX 0xaf
+#define RR_LDO 0xb1
+#define RR_LDO_SEL GENMASK(8, 6)
+#define RR_VCO 0xb2
+#define RR_LPF 0xb7
+#define RR_LPF_BUSY BIT(8)
#define RR_XTALX2 0xb8
#define RR_MALSEL 0xbe
+#define RR_SYNFB 0xc5
+#define RR_SYNFB_LK BIT(15)
+#define RR_LCKST 0xcf
+#define RR_LCKST_BIN BIT(0)
#define RR_LCK_TRG 0xd3
#define RR_LCK_TRGSEL BIT(8)
+#define RR_MMD 0xd5
+#define RR_MMD_RST_EN BIT(8)
+#define RR_MMD_RST_SYN BIT(6)
#define RR_IQKPLL 0xdc
#define RR_IQKPLL_MOD GENMASK(9, 8)
+#define RR_SYNLUT 0xdd
+#define RR_SYNLUT_MOD BIT(4)
#define RR_RCKD 0xde
#define RR_RCKD_POW GENMASK(19, 13)
#define RR_RCKD_BW BIT(2)
@@ -3479,11 +3785,14 @@
#define B_ANAPAR_ADCCLK BIT(30)
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
+#define B_ANAPAR_EN BIT(16)
#define B_ANAPAR_14 GENMASK(15, 0)
#define R_RFE_E_A2 0x0334
#define R_RFE_O_SEL_A2 0x0338
#define R_RFE_SEL0_A2 0x033C
#define R_RFE_SEL32_A2 0x0340
+#define R_CIRST 0x035c
+#define B_CIRST_SYN GENMASK(11, 10)
#define R_SWSI_DATA_V1 0x0370
#define B_SWSI_DATA_VAL_V1 GENMASK(19, 0)
#define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20)
@@ -3619,6 +3928,10 @@
#define R_P0_RFMODE 0x12AC
#define B_P0_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
#define B_P0_RFMODE_MUX GENMASK(11, 4)
+#define R_P0_RFMODE_ORI_RX 0x12AC
+#define B_P0_RFMODE_ORI_RX_ALL GENMASK(23, 12)
+#define R_P0_RFMODE_FTM_RX 0x12B0
+#define B_P0_RFMODE_FTM_RX GENMASK(11, 0)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
#define R_S0_RXDC 0x12D4
@@ -3671,6 +3984,9 @@
#define B_TXAGC_TP GENMASK(2, 0)
#define R_TSSI_THER 0x1C10
#define B_TSSI_THER GENMASK(29, 24)
+#define R_TSSI_CWRPT 0x1C18
+#define B_TSSI_CWRPT_RDY BIT(16)
+#define B_TSSI_CWRPT GENMASK(8, 0)
#define R_TXAGC_BTP 0x1CA0
#define B_TXAGC_BTP GENMASK(31, 24)
#define R_TXAGC_BB 0x1C60
@@ -3712,6 +4028,8 @@
#define B_RXCCA_DIS_V1 BIT(0)
#define R_RXSC 0x237C
#define B_RXSC_EN BIT(0)
+#define R_RX_RPL_OFST 0x23AC
+#define B_RX_RPL_OFST_CCK_MASK GENMASK(6, 0)
#define R_RXSCOBC 0x23B0
#define B_RXSCOBC_TH GENMASK(18, 0)
#define R_RXSCOCCK 0x23B4
@@ -3725,9 +4043,18 @@
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_S1_HW_SI_DIS 0x3200
#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
+#define R_P1_RXCK 0x32A0
+#define B_P1_RXCK_BW3 BIT(30)
+#define B_P1_TXCK_ALL GENMASK(19, 12)
+#define B_P1_RXCK_ON BIT(19)
+#define B_P1_RXCK_VAL GENMASK(18, 16)
#define R_P1_RFMODE 0x32AC
#define B_P1_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
#define B_P1_RFMODE_MUX GENMASK(11, 4)
+#define R_P1_RFMODE_ORI_RX 0x32AC
+#define B_P1_RFMODE_ORI_RX_ALL GENMASK(23, 12)
+#define R_P1_RFMODE_FTM_RX 0x32B0
+#define B_P1_RFMODE_FTM_RX GENMASK(11, 0)
#define R_P1_DBGMOD 0x32B8
#define B_P1_DBGMOD_ON BIT(30)
#define R_S1_RXDC 0x32D4
@@ -3761,7 +4088,10 @@
#define R_T2F_GI_COMB 0x4424
#define B_T2F_GI_COMB_EN BIT(2)
#define R_BT_DYN_DC_EST_EN 0x441C
+#define R_BT_DYN_DC_EST_EN_V1 0x4420
#define B_BT_DYN_DC_EST_EN_MSK BIT(31)
+#define R_ASSIGN_SBD_OPT_V1 0x4440
+#define B_ASSIGN_SBD_OPT_EN_V1 BIT(31)
#define R_ASSIGN_SBD_OPT 0x4450
#define B_ASSIGN_SBD_OPT_EN BIT(24)
#define R_DCFO_COMP_S0 0x448C
@@ -3770,8 +4100,12 @@
#define B_DCFO_WEIGHT_MSK GENMASK(27, 24)
#define R_DCFO_OPT 0x4494
#define B_DCFO_OPT_EN BIT(29)
+#define B_TXSHAPE_TRIANGULAR_CFG GENMASK(25, 24)
#define R_BANDEDGE 0x4498
#define B_BANDEDGE_EN BIT(30)
+#define R_DPD_BF 0x44a0
+#define B_DPD_BF_OFDM GENMASK(16, 12)
+#define B_DPD_BF_SCA GENMASK(6, 0)
#define R_TXPATH_SEL 0x458C
#define B_TXPATH_SEL_MSK GENMASK(31, 28)
#define R_TXPWR 0x4594
@@ -3902,6 +4236,8 @@
#define R_P1_NBIIDX 0x4770
#define B_P1_NBIIDX_VAL GENMASK(11, 0)
#define B_P1_NBIIDX_NOTCH_EN BIT(12)
+#define R_PKT_CTRL 0x47D4
+#define B_PKT_POP_EN BIT(8)
#define R_SEG0R_PD 0x481C
#define R_SEG0R_PD_V1 0x4860
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
@@ -3910,20 +4246,42 @@
#define R_2P4G_BAND 0x4970
#define B_2P4G_BAND_SEL BIT(1)
#define R_FC0_BW 0x4974
-#define B_FC0_BW_INV GENMASK(6, 0)
+#define R_FC0_BW_V1 0x49C0
#define B_FC0_BW_SET GENMASK(31, 30)
#define B_ANT_RX_BT_SEG0 GENMASK(25, 22)
#define B_ANT_RX_1RCCA_SEG1 GENMASK(21, 18)
#define B_ANT_RX_1RCCA_SEG0 GENMASK(17, 14)
+#define B_FC0_BW_INV GENMASK(6, 0)
#define R_CHBW_MOD 0x4978
+#define R_CHBW_MOD_V1 0x49C4
#define B_BT_SHARE BIT(14)
#define B_CHBW_MOD_SBW GENMASK(13, 12)
#define B_CHBW_MOD_PRICH GENMASK(11, 8)
#define B_ANT_RX_SEG0 GENMASK(3, 0)
+#define R_P0_RPL1 0x49B0
+#define B_P0_RPL1_41_MASK GENMASK(31, 24)
+#define B_P0_RPL1_40_MASK GENMASK(23, 16)
+#define B_P0_RPL1_20_MASK GENMASK(15, 8)
+#define B_P0_RPL1_MASK (B_P0_RPL1_41_MASK | B_P0_RPL1_40_MASK | B_P0_RPL1_20_MASK)
+#define B_P0_RPL1_SHIFT 8
+#define B_P0_RPL1_BIAS_MASK GENMASK(7, 0)
+#define R_P0_RPL2 0x49B4
+#define B_P0_RTL2_8A_MASK GENMASK(31, 24)
+#define B_P0_RTL2_81_MASK GENMASK(23, 16)
+#define B_P0_RTL2_80_MASK GENMASK(15, 8)
+#define B_P0_RTL2_42_MASK GENMASK(7, 0)
+#define R_P0_RPL3 0x49B8
+#define B_P0_RTL3_89_MASK GENMASK(31, 24)
+#define B_P0_RTL3_84_MASK GENMASK(23, 16)
+#define B_P0_RTL3_83_MASK GENMASK(15, 8)
+#define B_P0_RTL3_82_MASK GENMASK(7, 0)
#define R_PD_BOOST_EN 0x49E8
#define B_PD_BOOST_EN BIT(7)
#define R_P1_BACKOFF_IBADC_V1 0x49F0
#define B_P1_BACKOFF_IBADC_V1 GENMASK(31, 26)
+#define R_P1_RPL1 0x4A00
+#define R_P1_RPL2 0x4A04
+#define R_P1_RPL3 0x4A08
#define R_BK_FC0_INV_V1 0x4A1C
#define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0)
#define R_CCK_FC0_INV_V1 0x4A20
@@ -3934,8 +4292,10 @@
#define B_P1_AGC_EN BIT(31)
#define R_PATH1_TIA_INIT_V1 0x4AA8
#define B_PATH1_TIA_INIT_IDX_MSK_V1 BIT(9)
+#define R_P0_AGC_RSVD 0x4ACC
#define R_PATH0_RXBB_V1 0x4AD4
#define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0)
+#define R_P1_AGC_RSVD 0x4AD8
#define R_PATH1_RXBB_V1 0x4AE0
#define B_PATH1_RXBB_MSK_V1 GENMASK(31, 0)
#define R_PATH0_BT_BACKOFF_V1 0x4AE4
@@ -3951,6 +4311,7 @@
#define B_PATH0_NOTCH2_EN BIT(12)
#define B_PATH0_NOTCH2_VAL GENMASK(11, 0)
#define R_PATH0_5MDET 0x4C4C
+#define R_PATH0_5MDET_V1 0x46F8
#define B_PATH0_5MDET_EN BIT(12)
#define B_PATH0_5MDET_SB2 BIT(8)
#define B_PATH0_5MDET_SB0 BIT(6)
@@ -3964,6 +4325,7 @@
#define B_PATH1_NOTCH2_EN BIT(12)
#define B_PATH1_NOTCH2_VAL GENMASK(11, 0)
#define R_PATH1_5MDET 0x4D10
+#define R_PATH1_5MDET_V1 0x47B8
#define B_PATH1_5MDET_EN BIT(12)
#define B_PATH1_5MDET_SB2 BIT(8)
#define B_PATH1_5MDET_SB0 BIT(6)
@@ -3992,6 +4354,20 @@
#define B_CFO_COMP_VALID_BIT BIT(29)
#define B_CFO_COMP_WEIGHT_MSK GENMASK(27, 24)
#define B_CFO_COMP_VAL_MSK GENMASK(11, 0)
+#define R_TSSI_PA_K1 0x5600
+#define R_TSSI_PA_K2 0x5604
+#define R_P0_TSSI_ALIM1 0x5630
+#define B_P0_TSSI_ALIM1 GENMASK(29, 0)
+#define B_P0_TSSI_ALIM11 GENMASK(29, 20)
+#define B_P0_TSSI_ALIM12 GENMASK(19, 10)
+#define B_P0_TSSI_ALIM13 GENMASK(9, 0)
+#define R_P0_TSSI_ALIM3 0x5634
+#define B_P0_TSSI_ALIM31 GENMASK(9, 0)
+#define R_TSSI_PA_K5 0x5638
+#define R_P0_TSSI_ALIM2 0x563c
+#define B_P0_TSSI_ALIM2 GENMASK(29, 0)
+#define R_P0_TSSI_ALIM4 0x5640
+#define R_TSSI_PA_K8 0x5644
#define R_UPD_CLK 0x5670
#define B_DAC_VAL BIT(31)
#define B_ACK_VAL GENMASK(30, 29)
@@ -4003,6 +4379,11 @@
#define B_TXPWRB_VAL GENMASK(27, 19)
#define R_DPD_OFT_EN 0x5800
#define B_DPD_OFT_EN BIT(28)
+#define B_DPD_TSSI_CW GENMASK(26, 18)
+#define B_DPD_PWR_CW GENMASK(17, 9)
+#define B_DPD_REF GENMASK(8, 0)
+#define R_P0_TSSIC 0x5814
+#define B_P0_TSSIC_BYPASS BIT(11)
#define R_DPD_OFT_ADDR 0x5804
#define B_DPD_OFT_ADDR GENMASK(31, 27)
#define R_TXPWRB_H 0x580c
@@ -4011,13 +4392,18 @@
#define B_P0_TMETER GENMASK(15, 10)
#define B_P0_TMETER_DIS BIT(16)
#define B_P0_TMETER_TRK BIT(24)
+#define R_P1_TSSIC 0x7814
+#define B_P1_TSSIC_BYPASS BIT(11)
#define R_P0_TSSI_TRK 0x5818
#define B_P0_TSSI_TRK_EN BIT(30)
+#define B_P0_TSSI_RFC GENMASK(28, 27)
#define B_P0_TSSI_OFT_EN BIT(28)
#define B_P0_TSSI_OFT GENMASK(7, 0)
#define R_P0_TSSI_AVG 0x5820
+#define B_P0_TSSI_EN BIT(31)
#define B_P0_TSSI_AVG GENMASK(15, 12)
#define R_P0_RFCTM 0x5864
+#define B_P0_RFCTM_EN BIT(29)
#define B_P0_RFCTM_VAL GENMASK(25, 20)
#define R_P0_RFCTM_RDY BIT(26)
#define R_P0_TRSW 0x5868
@@ -4030,13 +4416,16 @@
#define B_P0_RFM_TX_OPT BIT(6)
#define B_P0_RFM_BT_EN BIT(5)
#define B_P0_RFM_OUT GENMASK(4, 0)
+#define R_P0_PATH_RST 0x58AC
#define R_P0_TXDPD 0x58D4
#define B_P0_TXDPD GENMASK(31, 28)
#define R_P0_TXPW_RSTB 0x58DC
#define B_P0_TXPW_RSTB_MANON BIT(30)
#define B_P0_TXPW_RSTB_TSSI BIT(31)
#define R_P0_TSSI_MV_AVG 0x58E4
+#define B_P0_TSSI_MV_MIX GENMASK(19, 11)
#define B_P0_TSSI_MV_AVG GENMASK(13, 11)
+#define B_P0_TSSI_MV_CLR BIT(14)
#define R_TXGAIN_SCALE 0x58F0
#define B_TXGAIN_SCALE_EN BIT(19)
#define B_TXGAIN_SCALE_OFT GENMASK(31, 24)
@@ -4061,24 +4450,41 @@
#define B_S0_DACKQ8_K GENMASK(15, 8)
#define R_RPL_BIAS_COMP1 0x6DF0
#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
+#define R_P1_TSSI_ALIM1 0x7630
+#define B_P1_TSSI_ALIM1 GENMASK(29, 0)
+#define B_P1_TSSI_ALIM11 GENMASK(29, 20)
+#define B_P1_TSSI_ALIM12 GENMASK(19, 10)
+#define B_P1_TSSI_ALIM13 GENMASK(9, 0)
+#define R_P1_TSSI_ALIM3 0x7634
+#define B_P1_TSSI_ALIM31 GENMASK(9, 0)
+#define R_P1_TSSI_ALIM2 0x763c
+#define B_P1_TSSI_ALIM2 GENMASK(29, 0)
+#define R_P1_TSSIC 0x7814
+#define B_P1_TSSIC_BYPASS BIT(11)
#define R_P1_TMETER 0x7810
#define B_P1_TMETER GENMASK(15, 10)
#define B_P1_TMETER_DIS BIT(16)
#define B_P1_TMETER_TRK BIT(24)
#define R_P1_TSSI_TRK 0x7818
#define B_P1_TSSI_TRK_EN BIT(30)
+#define B_P1_TSSI_RFC GENMASK(28, 27)
#define B_P1_TSSI_OFT_EN BIT(28)
#define B_P1_TSSI_OFT GENMASK(7, 0)
#define R_P1_TSSI_AVG 0x7820
+#define B_P1_TSSI_EN BIT(31)
#define B_P1_TSSI_AVG GENMASK(15, 12)
#define R_P1_RFCTM 0x7864
#define R_P1_RFCTM_RDY BIT(26)
#define B_P1_RFCTM_VAL GENMASK(25, 20)
+#define B_P1_RFCTM_DEL GENMASK(19, 11)
+#define R_P1_PATH_RST 0x78AC
#define R_P1_TXPW_RSTB 0x78DC
#define B_P1_TXPW_RSTB_MANON BIT(30)
#define B_P1_TXPW_RSTB_TSSI BIT(31)
#define R_P1_TSSI_MV_AVG 0x78E4
+#define B_P1_TSSI_MV_MIX GENMASK(19, 11)
#define B_P1_TSSI_MV_AVG GENMASK(13, 11)
+#define B_P1_TSSI_MV_CLR BIT(14)
#define R_TSSI_THOF 0x7C00
#define R_S1_DACKI 0x7E00
#define B_S1_DACKI_AR GENMASK(31, 28)
@@ -4148,6 +4554,7 @@
#define B_KPATH_CFG_ED GENMASK(21, 20)
#define R_KIP_RPT1 0x80D4
#define B_KIP_RPT1_SEL GENMASK(21, 16)
+#define B_KIP_RPT1_SEL_V1 GENMASK(19, 16)
#define R_SRAM_IQRX 0x80D8
#define R_GAPK 0x80E0
#define B_GAPK_ADR BIT(0)
@@ -4169,12 +4576,14 @@
#define B_PRT_COM_GL GENMASK(7, 4)
#define B_PRT_COM_CORI GENMASK(7, 0)
#define B_PRT_COM_RXBB GENMASK(5, 0)
+#define B_PRT_COM_RXBB_V1 GENMASK(4, 0)
#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
#define B_COEF_SEL_IQC BIT(0)
#define B_COEF_SEL_MDPD BIT(8)
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
+#define B_IQK_RES_K BIT(28)
#define B_IQK_RES_TXCFIR GENMASK(11, 8)
#define B_IQK_RES_RXCFIR GENMASK(3, 0)
#define R_TXIQC 0x8138
@@ -4206,13 +4615,18 @@
#define B_DPD_LBK BIT(7)
#define R_DPD_CH0 0x81AC
#define R_DPD_BND 0x81B4
+#define B_DPD_BND_1 GENMASK(24, 16)
+#define B_DPD_BND_0 GENMASK(8, 0)
#define R_DPD_CH0A 0x81BC
#define B_DPD_MEN GENMASK(31, 28)
#define B_DPD_ORDER GENMASK(26, 24)
+#define B_DPD_ORDER_V1 GENMASK(26, 25)
+#define B_DPD_CFG GENMASK(22, 0)
#define B_DPD_SEL GENMASK(13, 8)
#define R_TXAGC_RFK 0x81C4
#define B_TXAGC_RFK_CH0 GENMASK(5, 0)
#define R_DPD_COM 0x81C8
+#define B_DPD_COM_OF BIT(15)
#define R_KIP_IQP 0x81CC
#define B_KIP_IQP_SW GENMASK(13, 12)
#define B_KIP_IQP_IQSW GENMASK(5, 0)
@@ -4231,6 +4645,9 @@
#define B_RPT_PER_TSSI GENMASK(28, 16)
#define B_RPT_PER_OF GENMASK(15, 8)
#define B_RPT_PER_TH GENMASK(5, 0)
+#define R_IQRSN 0x8220
+#define B_IQRSN_K1 BIT(28)
+#define B_IQRSN_K2 BIT(16)
#define R_RXCFIR_P0C0 0x8D40
#define R_RXCFIR_P0C1 0x8D84
#define R_RXCFIR_P0C2 0x8DC8
@@ -4288,6 +4705,8 @@
#define B_DACK_S0P3_OK BIT(2)
#define R_DACK_DADCK01 0xC084
#define B_DACK_DADCK01 GENMASK(31, 24)
+#define R_DRCK_FH 0xC094
+#define B_DRCK_LAT BIT(9)
#define R_DRCK 0xC0C4
#define B_DRCK_IDLE BIT(9)
#define B_DRCK_EN BIT(6)
@@ -4295,15 +4714,29 @@
#define R_DRCK_RES 0xC0C8
#define B_DRCK_RES GENMASK(19, 15)
#define B_DRCK_POL BIT(3)
+#define R_DRCK_V1 0xC0CC
+#define B_DRCK_V1_SEL BIT(9)
+#define B_DRCK_V1_KICK BIT(6)
+#define B_DRCK_V1_CV GENMASK(4, 0)
+#define R_DRCK_RS 0xC0D0
+#define B_DRCK_RS_LPS GENMASK(19, 15)
+#define B_DRCK_RS_DONE BIT(3)
#define R_PATH0_SAMPL_DLY_T_V1 0xC0D4
#define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
#define R_P0_CFCH_BW0 0xC0D4
#define B_P0_CFCH_BW0 GENMASK(27, 26)
#define R_P0_CFCH_BW1 0xC0D8
+#define B_P0_CFCH_EX BIT(13)
#define B_P0_CFCH_BW1 GENMASK(8, 5)
+#define R_ADDCK0D 0xC0F0
+#define B_ADDCK0D_VAL2 GENMASK(31, 26)
+#define B_ADDCK0D_VAL GENMASK(25, 16)
#define R_ADDCK0 0xC0F4
+#define B_ADDCK0_TRG BIT(11)
#define B_ADDCK0 GENMASK(9, 8)
+#define B_ADDCK0_MAN GENMASK(5, 4)
#define B_ADDCK0_EN BIT(4)
+#define B_ADDCK0_VAL GENMASK(3, 0)
#define B_ADDCK0_RST BIT(2)
#define R_ADDCK0_RL 0xC0F8
#define B_ADDCK0_RLS GENMASK(29, 28)
@@ -4343,9 +4776,15 @@
#define R_PATH0_BW_SEL_V1 0xC0D8
#define B_PATH0_BW_SEL_MSK_V1 GENMASK(8, 5)
#define R_PATH1_BW_SEL_V1 0xC1D8
+#define B_PATH1_BW_SEL_EX BIT(13)
#define B_PATH1_BW_SEL_MSK_V1 GENMASK(8, 5)
+#define R_ADDCK1D 0xC1F0
+#define B_ADDCK1D_VAL2 GENMASK(31, 26)
+#define B_ADDCK1D_VAL GENMASK(25, 16)
#define R_ADDCK1 0xC1F4
+#define B_ADDCK1_TRG BIT(11)
#define B_ADDCK1 GENMASK(9, 8)
+#define B_ADDCK1_MAN GENMASK(5, 4)
#define B_ADDCK1_EN BIT(4)
#define B_ADDCK1_RST BIT(2)
#define R_ADDCK1_RL 0xC1F8
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 784147680353..eff6519cf019 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -48,6 +48,10 @@ static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
&rtw89_mac_size.ple_size0, &rtw89_mac_size.wde_qt0,
&rtw89_mac_size.wde_qt0, &rtw89_mac_size.ple_qt4,
&rtw89_mac_size.ple_qt5},
+ [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size0,
+ &rtw89_mac_size.ple_size0, &rtw89_mac_size.wde_qt0,
+ &rtw89_mac_size.wde_qt0, &rtw89_mac_size.ple_qt4,
+ &rtw89_mac_size.ple_qt_52a_wow},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4,
&rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4,
&rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
@@ -1410,151 +1414,14 @@ static void rtw8852a_set_txpwr_ref(struct rtw89_dev *rtwdev,
phy_idx);
}
-static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 band = chan->band_type;
- u8 ch = chan->channel;
- static const u8 rs[] = {
- RTW89_RS_CCK,
- RTW89_RS_OFDM,
- RTW89_RS_MCS,
- RTW89_RS_HEDCM,
- };
- s8 tmp;
- u8 i, j;
- u32 val, shf, addr = R_AX_PWR_BY_RATE;
- struct rtw89_rate_desc cur;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] set txpwr byrate with ch=%d\n", ch);
-
- for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
- for (i = 0; i < ARRAY_SIZE(rs); i++) {
- if (cur.nss >= rtw89_rs_nss_max[rs[i]])
- continue;
-
- val = 0;
- cur.rs = rs[i];
-
- for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
- cur.idx = j;
- shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
- &cur);
- val |= (tmp << shf);
-
- if ((j + 1) % 4)
- continue;
-
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
- val = 0;
- addr += 4;
- }
- }
- }
-}
-
-static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 band = chan->band_type;
- struct rtw89_rate_desc desc = {
- .nss = RTW89_NSS_1,
- .rs = RTW89_RS_OFFSET,
- };
- u32 val = 0;
- s8 v;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
-
- for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
- val |= ((v & 0xf) << (4 * desc.idx));
- }
-
- rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
- GENMASK(19, 0), val);
-}
-
-static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
-#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = chan->channel;
- u8 bw = chan->band_width;
- struct rtw89_txpwr_limit lmt[NTX_NUM_8852A];
- u32 addr, val;
- const s8 *ptr;
- u8 i, j;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
-
- for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
-
- for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
- addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
- ptr = (s8 *)&lmt[i] + j;
-
- val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
- FIELD_PREP(GENMASK(15, 8), ptr[1]) |
- FIELD_PREP(GENMASK(23, 16), ptr[2]) |
- FIELD_PREP(GENMASK(31, 24), ptr[3]);
-
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
- }
- }
-#undef __MAC_TXPWR_LMT_PAGE_SIZE
-}
-
-static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
-#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = chan->channel;
- u8 bw = chan->band_width;
- struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852A];
- u32 addr, val;
- const s8 *ptr;
- u8 i, j;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
-
- for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
-
- for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
- addr = R_AX_PWR_RU_LMT + j +
- __MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
- ptr = (s8 *)&lmt_ru[i] + j;
-
- val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
- FIELD_PREP(GENMASK(15, 8), ptr[1]) |
- FIELD_PREP(GENMASK(23, 16), ptr[2]) |
- FIELD_PREP(GENMASK(31, 24), ptr[3]);
-
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
- }
- }
-
-#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
-}
-
static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_byrate(rtwdev, chan, phy_idx);
- rtw8852a_set_txpwr_offset(rtwdev, chan, phy_idx);
- rtw8852a_set_txpwr_limit(rtwdev, chan, phy_idx);
- rtw8852a_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -2008,19 +1875,6 @@ static struct rtw89_btc_fbtc_mreg rtw89_btc_8852a_mon_reg[] = {
};
static
-void rtw8852a_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_btc_bt_link_info *b = &bt->link_info;
-
- /* fix LNA2 = level-5 for BT ACI issue at BTG */
- if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0)
- dm->trx_para_level = 1;
-}
-
-static
void rtw8852a_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -2136,6 +1990,15 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
rtw8852a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8852a = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = RTW89_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW89_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+};
+#endif
+
static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.enable_bb_rf = rtw89_mac_enable_bb_rf,
.disable_bb_rf = rtw89_mac_disable_bb_rf,
@@ -2178,7 +2041,6 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.btc_set_wl_pri = rtw8852a_btc_set_wl_pri,
.btc_set_wl_txpwr_ctrl = rtw8852a_btc_set_wl_txpwr_ctrl,
.btc_get_bt_rssi = rtw8852a_btc_get_bt_rssi,
- .btc_bt_aci_imp = rtw8852a_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852a_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852a_btc_wl_s1_standby,
.btc_set_wl_rx_gain = rtw8852a_btc_set_wl_rx_gain,
@@ -2196,6 +2058,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852a_hfc_param_ini_pcie,
.dle_mem = rtw8852a_dle_mem_pcie,
+ .wde_qempty_acq_num = 16,
+ .wde_qempty_mgq_sel = 16,
.rf_base_addr = {0xc000, 0xd000},
.pwr_on_seq = pwr_on_seq_8852a,
.pwr_off_seq = pwr_off_seq_8852a,
@@ -2218,6 +2082,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bw160 = false,
+ .support_ul_tb_ctrl = false,
.hw_sec_hdr = false,
.rf_path_num = 2,
.tx_nss = 2,
@@ -2279,11 +2144,15 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.c2h_ctrl_reg = R_AX_C2HREG_CTRL,
.c2h_regs = rtw8852a_c2h_regs,
.page_regs = &rtw8852a_page_regs,
+ .cfo_src_fd = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 3,
.imr_info = &rtw8852a_imr_info,
.rrsr_cfgs = &rtw8852a_rrsr_cfgs,
.dma_ch_mask = 0,
+#ifdef CONFIG_PM
+ .wowlan_stub = &rtw_wowlan_stub_8852a,
+#endif
};
EXPORT_SYMBOL(rtw8852a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index fcff1194c009..ea82fed7b7be 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -8,7 +8,6 @@
#include "core.h"
#define RF_PATH_NUM_8852A 2
-#define NTX_NUM_8852A 2
enum rtw8852a_pmac_mode {
NONE_TEST,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 9f9908418ee4..b635ac1d1ca2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -2,9 +2,46 @@
/* Copyright(c) 2019-2022 Realtek Corporation
*/
-#include "core.h"
+#include "coex.h"
+#include "fw.h"
#include "mac.h"
+#include "phy.h"
#include "reg.h"
+#include "rtw8852b.h"
+#include "rtw8852b_rfk.h"
+#include "rtw8852b_table.h"
+#include "txrx.h"
+
+static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = {
+ {5, 343, grp_0}, /* ACH 0 */
+ {5, 343, grp_0}, /* ACH 1 */
+ {5, 343, grp_0}, /* ACH 2 */
+ {5, 343, grp_0}, /* ACH 3 */
+ {0, 0, grp_0}, /* ACH 4 */
+ {0, 0, grp_0}, /* ACH 5 */
+ {0, 0, grp_0}, /* ACH 6 */
+ {0, 0, grp_0}, /* ACH 7 */
+ {4, 344, grp_0}, /* B0MGQ */
+ {4, 344, grp_0}, /* B0HIQ */
+ {0, 0, grp_0}, /* B1MGQ */
+ {0, 0, grp_0}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = {
+ 448, /* Group 0 */
+ 0, /* Group 1 */
+ 448, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8852b_hfc_chcfg_pcie, &rtw8852b_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
[RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
@@ -19,6 +56,2275 @@ static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
NULL},
};
+static const struct rtw89_reg3_def rtw8852b_pmac_ht20_mcs7_tbl[] = {
+ {0x4580, 0x0000ffff, 0x0},
+ {0x4580, 0xffff0000, 0x0},
+ {0x4584, 0x0000ffff, 0x0},
+ {0x4584, 0xffff0000, 0x0},
+ {0x4580, 0x0000ffff, 0x1},
+ {0x4578, 0x00ffffff, 0x2018b},
+ {0x4570, 0x03ffffff, 0x7},
+ {0x4574, 0x03ffffff, 0x32407},
+ {0x45b8, 0x00000010, 0x0},
+ {0x45b8, 0x00000100, 0x0},
+ {0x45b8, 0x00000080, 0x0},
+ {0x45b8, 0x00000008, 0x0},
+ {0x45a0, 0x0000ff00, 0x0},
+ {0x45a0, 0xff000000, 0x1},
+ {0x45a4, 0x0000ff00, 0x2},
+ {0x45a4, 0xff000000, 0x3},
+ {0x45b8, 0x00000020, 0x0},
+ {0x4568, 0xe0000000, 0x0},
+ {0x45b8, 0x00000002, 0x1},
+ {0x456c, 0xe0000000, 0x0},
+ {0x45b4, 0x00006000, 0x0},
+ {0x45b4, 0x00001800, 0x1},
+ {0x45b8, 0x00000040, 0x0},
+ {0x45b8, 0x00000004, 0x0},
+ {0x45b8, 0x00000200, 0x0},
+ {0x4598, 0xf8000000, 0x0},
+ {0x45b8, 0x00100000, 0x0},
+ {0x45a8, 0x00000fc0, 0x0},
+ {0x45b8, 0x00200000, 0x0},
+ {0x45b0, 0x00000038, 0x0},
+ {0x45b0, 0x000001c0, 0x0},
+ {0x45a0, 0x000000ff, 0x0},
+ {0x45b8, 0x00400000, 0x0},
+ {0x4590, 0x000007ff, 0x0},
+ {0x45b0, 0x00000e00, 0x0},
+ {0x45ac, 0x0000001f, 0x0},
+ {0x45b8, 0x00800000, 0x0},
+ {0x45a8, 0x0003f000, 0x0},
+ {0x45b8, 0x01000000, 0x0},
+ {0x45b0, 0x00007000, 0x0},
+ {0x45b0, 0x00038000, 0x0},
+ {0x45a0, 0x00ff0000, 0x0},
+ {0x45b8, 0x02000000, 0x0},
+ {0x4590, 0x003ff800, 0x0},
+ {0x45b0, 0x001c0000, 0x0},
+ {0x45ac, 0x000003e0, 0x0},
+ {0x45b8, 0x04000000, 0x0},
+ {0x45a8, 0x00fc0000, 0x0},
+ {0x45b8, 0x08000000, 0x0},
+ {0x45b0, 0x00e00000, 0x0},
+ {0x45b0, 0x07000000, 0x0},
+ {0x45a4, 0x000000ff, 0x0},
+ {0x45b8, 0x10000000, 0x0},
+ {0x4594, 0x000007ff, 0x0},
+ {0x45b0, 0x38000000, 0x0},
+ {0x45ac, 0x00007c00, 0x0},
+ {0x45b8, 0x20000000, 0x0},
+ {0x45a8, 0x3f000000, 0x0},
+ {0x45b8, 0x40000000, 0x0},
+ {0x45b4, 0x00000007, 0x0},
+ {0x45b4, 0x00000038, 0x0},
+ {0x45a4, 0x00ff0000, 0x0},
+ {0x45b8, 0x80000000, 0x0},
+ {0x4594, 0x003ff800, 0x0},
+ {0x45b4, 0x000001c0, 0x0},
+ {0x4598, 0xf8000000, 0x0},
+ {0x45b8, 0x00100000, 0x0},
+ {0x45a8, 0x00000fc0, 0x7},
+ {0x45b8, 0x00200000, 0x0},
+ {0x45b0, 0x00000038, 0x0},
+ {0x45b0, 0x000001c0, 0x0},
+ {0x45a0, 0x000000ff, 0x0},
+ {0x45b4, 0x06000000, 0x0},
+ {0x45b0, 0x00000007, 0x0},
+ {0x45b8, 0x00080000, 0x0},
+ {0x45a8, 0x0000003f, 0x0},
+ {0x457c, 0xffe00000, 0x1},
+ {0x4530, 0xffffffff, 0x0},
+ {0x4588, 0x00003fff, 0x0},
+ {0x4598, 0x000001ff, 0x0},
+ {0x4534, 0xffffffff, 0x0},
+ {0x4538, 0xffffffff, 0x0},
+ {0x453c, 0xffffffff, 0x0},
+ {0x4588, 0x0fffc000, 0x0},
+ {0x4598, 0x0003fe00, 0x0},
+ {0x4540, 0xffffffff, 0x0},
+ {0x4544, 0xffffffff, 0x0},
+ {0x4548, 0xffffffff, 0x0},
+ {0x458c, 0x00003fff, 0x0},
+ {0x4598, 0x07fc0000, 0x0},
+ {0x454c, 0xffffffff, 0x0},
+ {0x4550, 0xffffffff, 0x0},
+ {0x4554, 0xffffffff, 0x0},
+ {0x458c, 0x0fffc000, 0x0},
+ {0x459c, 0x000001ff, 0x0},
+ {0x4558, 0xffffffff, 0x0},
+ {0x455c, 0xffffffff, 0x0},
+ {0x4530, 0xffffffff, 0x4e790001},
+ {0x4588, 0x00003fff, 0x0},
+ {0x4598, 0x000001ff, 0x1},
+ {0x4534, 0xffffffff, 0x0},
+ {0x4538, 0xffffffff, 0x4b},
+ {0x45ac, 0x38000000, 0x7},
+ {0x4588, 0xf0000000, 0x0},
+ {0x459c, 0x7e000000, 0x0},
+ {0x45b8, 0x00040000, 0x0},
+ {0x45b8, 0x00020000, 0x0},
+ {0x4590, 0xffc00000, 0x0},
+ {0x45b8, 0x00004000, 0x0},
+ {0x4578, 0xff000000, 0x0},
+ {0x45b8, 0x00000400, 0x0},
+ {0x45b8, 0x00000800, 0x0},
+ {0x45b8, 0x00001000, 0x0},
+ {0x45b8, 0x00002000, 0x0},
+ {0x45b4, 0x00018000, 0x0},
+ {0x45ac, 0x07800000, 0x0},
+ {0x45b4, 0x00000600, 0x2},
+ {0x459c, 0x0001fe00, 0x80},
+ {0x45ac, 0x00078000, 0x3},
+ {0x459c, 0x01fe0000, 0x1},
+};
+
+static const struct rtw89_reg3_def rtw8852b_btc_preagc_en_defs[] = {
+ {0x46D0, GENMASK(1, 0), 0x3},
+ {0x4790, GENMASK(1, 0), 0x3},
+ {0x4AD4, GENMASK(31, 0), 0xf},
+ {0x4AE0, GENMASK(31, 0), 0xf},
+ {0x4688, GENMASK(31, 24), 0x80},
+ {0x476C, GENMASK(31, 24), 0x80},
+ {0x4694, GENMASK(7, 0), 0x80},
+ {0x4694, GENMASK(15, 8), 0x80},
+ {0x4778, GENMASK(7, 0), 0x80},
+ {0x4778, GENMASK(15, 8), 0x80},
+ {0x4AE4, GENMASK(23, 0), 0x780D1E},
+ {0x4AEC, GENMASK(23, 0), 0x780D1E},
+ {0x469C, GENMASK(31, 26), 0x34},
+ {0x49F0, GENMASK(31, 26), 0x34},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_en_defs);
+
+static const struct rtw89_reg3_def rtw8852b_btc_preagc_dis_defs[] = {
+ {0x46D0, GENMASK(1, 0), 0x0},
+ {0x4790, GENMASK(1, 0), 0x0},
+ {0x4AD4, GENMASK(31, 0), 0x60},
+ {0x4AE0, GENMASK(31, 0), 0x60},
+ {0x4688, GENMASK(31, 24), 0x1a},
+ {0x476C, GENMASK(31, 24), 0x1a},
+ {0x4694, GENMASK(7, 0), 0x2a},
+ {0x4694, GENMASK(15, 8), 0x2a},
+ {0x4778, GENMASK(7, 0), 0x2a},
+ {0x4778, GENMASK(15, 8), 0x2a},
+ {0x4AE4, GENMASK(23, 0), 0x79E99E},
+ {0x4AEC, GENMASK(23, 0), 0x79E99E},
+ {0x469C, GENMASK(31, 26), 0x26},
+ {0x49F0, GENMASK(31, 26), 0x26},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_dis_defs);
+
+static const u32 rtw8852b_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
+ R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852b_c2h_regs[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+ R_AX_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8852b_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852b_dcfo_comp = {
+ R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
+static const struct rtw89_imr_info rtw8852b_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET,
+ .wsec_imr_reg = R_AX_SEC_DEBUG,
+ .wsec_imr_set = B_AX_IMR_ERROR,
+ .mpdu_tx_imr_set = 0,
+ .mpdu_rx_imr_set = 0,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR,
+ .wde_imr_set = B_AX_WDE_IMR_SET,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR,
+ .ple_imr_set = B_AX_PLE_IMR_SET,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
+ .other_disp_imr_set = 0,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
+ .bbrpt_err_imr_set = 0,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_ALL,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET,
+ .cdma_imr_0_reg = R_AX_DLE_CTRL,
+ .cdma_imr_0_clr = B_AX_DLE_IMR_CLR,
+ .cdma_imr_0_set = B_AX_DLE_IMR_SET,
+ .cdma_imr_1_reg = 0,
+ .cdma_imr_1_clr = 0,
+ .cdma_imr_1_set = 0,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR,
+ .phy_intf_imr_clr = 0,
+ .phy_intf_imr_set = 0,
+ .rmac_imr_reg = R_AX_RMAC_ERR_ISR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET,
+ .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET,
+};
+
+static const struct rtw89_rrsr_cfgs rtw8852b_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852b_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V1,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852b_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852b_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852b_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda28),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda2c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda10),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda20),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xcef4),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x8424),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+ RTW89_DEF_FBTC_MREG(REG_BT_MODEM, 4, 0x178),
+};
+
+static const u8 rtw89_btc_8852b_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40};
+static const u8 rtw89_btc_8852b_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+
+static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+ B_AX_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_AFE_LDO_CTRL, B_AX_AON_OFF_PC_EN);
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_AON_OFF_PC_EN,
+ 1000, 20000, false, rtwdev, R_AX_AFE_LDO_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0, B_AX_C1_L1_MASK, 0x1);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_OFF_CTRL0, B_AX_C3_L1_MASK, 0x3);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+ XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+ XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+ XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+ XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_SRAM_CTRL, 0, XTAL_SI_SRAM_DIS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+
+ if (!rtwdev->efuse.valid || rtwdev->efuse.power_k_valid)
+ goto func_en;
+
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VOL_L1_MASK, 0x9);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VREFPFM_L_MASK, 0xA);
+
+ if (rtwdev->hal.cv == CHIP_CBV) {
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write16_mask(rtwdev, R_AX_HCI_LDO_CTRL, B_AX_R_AX_VADJ_MASK, 0xA);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ }
+
+func_en:
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+ B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+ B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+ B_AX_DMACREG_GCKEN);
+ rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+ B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN |
+ B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN | B_AX_TMAC_EN |
+ B_AX_RMAC_EN);
+
+ rtw89_write32_mask(rtwdev, R_AX_EECS_EESK_FUNC_SEL, B_AX_PINMUX_EESK_FUNC_SEL_MASK,
+ PINMUX_EESK_FUNC_SEL_BT_LOG);
+
+ return 0;
+}
+
+static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+ XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+ XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x3);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ return 0;
+}
+
+static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852b_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852b_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852b_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852b_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
+{
+ if (high)
+ *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
+ if (low)
+ *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
+
+ return data != 0xff;
+}
+
+static void rtw8852b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
+ struct rtw8852b_efuse *map)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ bool valid = false;
+
+ valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
+ valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_low,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_high,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
+
+ gain->offset_valid = valid;
+}
+
+static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852b_efuse *map;
+
+ map = (struct rtw8852b_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852b_efuse_parsing_tssi(rtwdev, map);
+ rtw8852b_efuse_parsing_gain_offset(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852be_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852b_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+#define PWR_K_CHK_OFFSET 0x5E9
+#define PWR_K_CHK_VALUE 0xAA
+ u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr;
+
+ if (phycap_map[offset] == PWR_K_CHK_VALUE)
+ rtwdev->efuse.power_k_valid = true;
+}
+
+static void rtw8852b_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852B] = {0x5D6, 0x5AB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852b_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852B] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852b_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852b_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852B] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852b_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static void rtw8852b_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = {
+ {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8},
+ {0x590, 0x58F, 0, 0x58E, 0x58D},
+ };
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ u32 phycap_addr = rtwdev->chip->phycap_addr;
+ bool valid = false;
+ int path, i;
+ u8 data;
+
+ for (path = 0; path < 2; path++)
+ for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) {
+ if (comp_addrs[path][i] == 0)
+ continue;
+
+ data = phycap_map[comp_addrs[path][i] - phycap_addr];
+ valid |= _decode_efuse_gain(data, NULL,
+ &gain->comp[path][i]);
+ }
+
+ gain->comp_valid = valid;
+}
+
+static int rtw8852b_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852b_phycap_parsing_power_cal(rtwdev, phycap_map);
+ rtw8852b_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852b_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852b_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+ rtw8852b_phycap_parsing_gain_comp(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void rtw8852b_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852b_thermal_trim(rtwdev);
+ rtw8852b_pa_bias_trim(rtwdev);
+}
+
+static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
+ u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
+ u8 txsc20 = 0, txsc40 = 0;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_80:
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
+ rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0));
+ rtw89_write32(rtwdev, sub_carr, txsc20);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK);
+ rtw89_write32(rtwdev, sub_carr, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (chan->channel > 14) {
+ rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE);
+ rtw89_write8_set(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ } else {
+ rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE);
+ rtw89_write8_clr(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ }
+}
+
+static const u32 rtw8852b_sco_barker_threshold[14] = {
+ 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6,
+ 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4
+};
+
+static const u32 rtw8852b_sco_cck_threshold[14] = {
+ 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724,
+ 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed
+};
+
+static void rtw8852b_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch)
+{
+ u8 ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH,
+ rtw8852b_sco_barker_threshold[ch_element]);
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH,
+ rtw8852b_sco_cck_threshold[ch_element]);
+}
+
+static u8 rtw8852b_sco_mapping(u8 central_ch)
+{
+ if (central_ch == 1)
+ return 109;
+ else if (central_ch >= 2 && central_ch <= 6)
+ return 108;
+ else if (central_ch >= 7 && central_ch <= 10)
+ return 107;
+ else if (central_ch >= 11 && central_ch <= 14)
+ return 106;
+ else if (central_ch == 36 || central_ch == 38)
+ return 51;
+ else if (central_ch >= 40 && central_ch <= 58)
+ return 50;
+ else if (central_ch >= 60 && central_ch <= 64)
+ return 49;
+ else if (central_ch == 100 || central_ch == 102)
+ return 48;
+ else if (central_ch >= 104 && central_ch <= 126)
+ return 47;
+ else if (central_ch >= 128 && central_ch <= 151)
+ return 46;
+ else if (central_ch >= 153 && central_ch <= 177)
+ return 45;
+ else
+ return 0;
+}
+
+struct rtw8852b_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8852B];
+ u32 gain_a[BB_PATH_NUM_8852B];
+ u32 gain_mask;
+};
+
+static const struct rtw8852b_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x000000ff },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x0000ff00 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x000000ff },
+};
+
+static const struct rtw8852b_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0xff000000 },
+};
+
+static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_lna[i].gain_g[path];
+ else
+ reg = bb_gain_lna[i].gain_a[path];
+
+ mask = bb_gain_lna[i].gain_mask;
+ val = gain->lna_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_tia[i].gain_g[path];
+ else
+ reg = bb_gain_tia[i].gain_a[path];
+
+ mask = bb_gain_tia[i].gain_mask;
+ val = gain->tia_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+}
+
+static void rtw8852b_set_gain_offset(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD};
+ static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ R_PATH1_G_TIA1_LNA6_OP1DB_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_ofdm_band;
+ s32 offset_a, offset_b;
+ s32 offset_ofdm, offset_cck;
+ s32 tmp;
+ u8 path;
+
+ if (!efuse_gain->comp_valid)
+ goto next;
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8852B; path++) {
+ tmp = efuse_gain->comp[path][subband];
+ tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp);
+ }
+
+next:
+ if (!efuse_gain->offset_valid)
+ return;
+
+ gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband);
+
+ offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
+ offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
+
+ tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp);
+
+ tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp);
+
+ if (hal->antenna_rx == RF_B) {
+ offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
+ offset_cck = -efuse_gain->offset[RF_PATH_B][0];
+ } else {
+ offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
+ offset_cck = -efuse_gain->offset[RF_PATH_A][0];
+ }
+
+ tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0];
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
+
+ tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0];
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
+
+ if (subband == RTW89_CH_2G) {
+ tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1);
+ tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1);
+ rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST,
+ B_RX_RPL_OFST_CCK_MASK, tmp);
+ }
+}
+
+static
+void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 band = rtw89_subband_to_bb_gain_band(subband);
+ u32 val;
+
+ val = FIELD_PREP(B_P0_RPL1_20_MASK, (gain->rpl_ofst_20[band][RF_PATH_A] +
+ gain->rpl_ofst_20[band][RF_PATH_B]) / 2) |
+ FIELD_PREP(B_P0_RPL1_40_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][0] +
+ gain->rpl_ofst_40[band][RF_PATH_B][0]) / 2) |
+ FIELD_PREP(B_P0_RPL1_41_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][1] +
+ gain->rpl_ofst_40[band][RF_PATH_B][1]) / 2);
+ val >>= B_P0_RPL1_SHIFT;
+ rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val);
+
+ val = FIELD_PREP(B_P0_RTL2_42_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][2] +
+ gain->rpl_ofst_40[band][RF_PATH_B][2]) / 2) |
+ FIELD_PREP(B_P0_RTL2_80_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][0] +
+ gain->rpl_ofst_80[band][RF_PATH_B][0]) / 2) |
+ FIELD_PREP(B_P0_RTL2_81_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][1] +
+ gain->rpl_ofst_80[band][RF_PATH_B][1]) / 2) |
+ FIELD_PREP(B_P0_RTL2_8A_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][10] +
+ gain->rpl_ofst_80[band][RF_PATH_B][10]) / 2);
+ rtw89_phy_write32(rtwdev, R_P0_RPL2, val);
+ rtw89_phy_write32(rtwdev, R_P1_RPL2, val);
+
+ val = FIELD_PREP(B_P0_RTL3_82_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][2] +
+ gain->rpl_ofst_80[band][RF_PATH_B][2]) / 2) |
+ FIELD_PREP(B_P0_RTL3_83_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][3] +
+ gain->rpl_ofst_80[band][RF_PATH_B][3]) / 2) |
+ FIELD_PREP(B_P0_RTL3_84_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][4] +
+ gain->rpl_ofst_80[band][RF_PATH_B][4]) / 2) |
+ FIELD_PREP(B_P0_RTL3_89_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][9] +
+ gain->rpl_ofst_80[band][RF_PATH_B][9]) / 2);
+ rtw89_phy_write32(rtwdev, R_P0_RPL3, val);
+ rtw89_phy_write32(rtwdev, R_P1_RPL3, val);
+}
+
+static void rtw8852b_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 central_ch = chan->channel;
+ u8 subband = chan->subband_type;
+ u8 sco_comp;
+ bool is_2g = central_ch <= 14;
+
+ /* Path A */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx);
+
+ /* Path B */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx);
+
+ /* SCO compensate FC setting */
+ sco_comp = rtw8852b_sco_mapping(central_ch);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx);
+
+ if (chan->band_type == RTW89_BAND_6G)
+ return;
+
+ /* CCK parameters */
+ if (central_ch == 14) {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5);
+ }
+
+ rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_A);
+ rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852b_set_gain_offset(rtwdev, subband, phy_idx);
+ rtw8852b_set_rxsc_rpl_comp(rtwdev, subband);
+}
+
+static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ }
+}
+
+static void rtw8852b_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 rx_path_0;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
+ pri_ch, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ /*CCK primary channel */
+ if (pri_ch == RTW89_SC_20_UPPER)
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0);
+
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
+ pri_ch, phy_idx);
+
+ /*Set RF mode at A */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
+ pri_ch);
+ }
+
+ rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A);
+ rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B);
+
+ rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0,
+ phy_idx);
+ if (rx_path_0 == 0x1)
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
+ else if (rx_path_0 == 0x2)
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
+}
+
+static void rtw8852b_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en)
+{
+ if (cck_en) {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
+ }
+}
+
+static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 pri_ch = chan->primary_channel;
+ bool mask_5m_low;
+ bool mask_5m_en;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_40:
+ /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */
+ mask_5m_en = true;
+ mask_5m_low = pri_ch == 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */
+ mask_5m_en = pri_ch == 3 || pri_ch == 4;
+ mask_5m_low = pri_ch == 4;
+ break;
+ default:
+ mask_5m_en = false;
+ break;
+ }
+
+ if (!mask_5m_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
+ B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx);
+ return;
+ }
+
+ if (mask_5m_low) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0);
+ }
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
+ B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx);
+}
+
+static void rtw8852b_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+}
+
+static void rtw8852b_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ enum rtw89_phy_idx phy_idx, bool en)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static void rtw8852b_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+ rtw8852b_bb_reset_all(rtwdev, phy_idx);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+}
+
+static void rtw8852b_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 addr;
+
+ for (addr = R_AX_PWR_MACID_LMT_TABLE0;
+ addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+}
+
+static void rtw8852b_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP);
+
+ rtw8852b_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
+
+ /* read these registers after loading BB parameters */
+ gain->offset_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK);
+ gain->rssi_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK);
+}
+
+static void rtw8852b_bb_set_pop(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN);
+}
+
+static void rtw8852b_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = chan->channel <= 14;
+ u8 pri_ch_idx = chan->pri_ch_idx;
+
+ if (cck_en)
+ rtw8852b_ctrl_sco_cck(rtwdev, chan->primary_channel);
+
+ rtw8852b_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8852b_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
+ rtw8852b_ctrl_cck_en(rtwdev, cck_en);
+ if (chan->band_type == RTW89_BAND_5G) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
+ }
+ rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0,
+ chan->primary_channel);
+ rtw8852b_5m_mask(rtwdev, chan, phy_idx);
+ rtw8852b_bb_set_pop(rtwdev);
+ rtw8852b_bb_reset_all(rtwdev, phy_idx);
+}
+
+static void rtw8852b_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852b_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852b_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852b_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8852b_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk[2] = {R_P0_TSSI_TRK, R_P1_TSSI_TRK};
+ static const u32 ctrl_bbrst[2] = {R_P0_TXPW_RSTB, R_P1_TXPW_RSTB};
+
+ if (en) {
+ rtw89_phy_write32_mask(rtwdev, ctrl_bbrst[path], B_P0_TXPW_RSTB_MANON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, ctrl_bbrst[path], B_P0_TXPW_RSTB_MANON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x1);
+ }
+}
+
+static void rtw8852b_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en,
+ u8 phy_idx)
+{
+ if (!rtwdev->dbcc_en) {
+ rtw8852b_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852b_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8852b_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8852b_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static void rtw8852b_adc_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0xf);
+}
+
+static void rtw8852b_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw8852b_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852b_adc_en(rtwdev, false);
+ fsleep(40);
+ rtw8852b_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw8852b_adc_en(rtwdev, true);
+ rtw8852b_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852b_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ }
+}
+
+static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
+{
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+
+ rtw8852b_dpk_init(rtwdev);
+ rtw8852b_rck(rtwdev);
+ rtw8852b_dack(rtwdev);
+ rtw8852b_rx_dck(rtwdev, RTW89_PHY_0);
+}
+
+static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+
+ rtw8852b_rx_dck(rtwdev, phy_idx);
+ rtw8852b_iqk(rtwdev, phy_idx);
+ rtw8852b_tssi(rtwdev, phy_idx, true);
+ rtw8852b_dpk(rtwdev, phy_idx);
+}
+
+static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852b_tssi_scan(rtwdev, phy_idx);
+}
+
+static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ rtw8852b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+}
+
+static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
+{
+ rtw8852b_dpk_track(rtwdev);
+}
+
+static u32 rtw8852b_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, s16 ref)
+{
+ const u16 tssi_16dbm_cw = 0x12c;
+ const u8 base_cw_0db = 0x27;
+ const s8 ofst_int = 0;
+ s16 pwr_s10_3;
+ s16 rf_pwr_cw;
+ u16 bb_pwr_cw;
+ u32 pwr_cw;
+ u32 tssi_ofst_cw;
+
+ pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
+ rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
+ rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
+ pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
+
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
+ tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
+
+ return FIELD_PREP(B_DPD_TSSI_CW, tssi_ofst_cw) |
+ FIELD_PREP(B_DPD_PWR_CW, pwr_cw) |
+ FIELD_PREP(B_DPD_REF, ref);
+}
+
+static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 addr[RF_PATH_NUM_8852B] = {0x5800, 0x7800};
+ const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
+ const u8 ofst_ofdm = 0x4;
+ const u8 ofst_cck = 0x8;
+ const s16 ref_ofdm = 0;
+ const s16 ref_cck = 0;
+ u32 val;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
+ B_AX_PWR_REF, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
+ phy_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
+ val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
+ phy_idx);
+}
+
+static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ u8 tx_shape_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2))
+#define __DFIR_CFG_MASK 0xffffffff
+#define __DFIR_CFG_NR 8
+#define __DECL_DFIR_PARAM(_name, _val...) \
+ static const u32 param_ ## _name[] = {_val}; \
+ static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR)
+
+ __DECL_DFIR_PARAM(flat,
+ 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
+ 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5);
+ __DECL_DFIR_PARAM(sharp,
+ 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090,
+ 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5);
+ __DECL_DFIR_PARAM(sharp_14,
+ 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
+ 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ const u32 *param;
+ u32 addr;
+ int i;
+
+ if (ch > 14) {
+ rtw89_warn(rtwdev,
+ "set tx shape dfir by unknown ch: %d on 2G\n", ch);
+ return;
+ }
+
+ if (ch == 14)
+ param = param_sharp_14;
+ else
+ param = tx_shape_idx == 0 ? param_flat : param_sharp;
+
+ for (i = 0; i < __DFIR_CFG_NR; i++) {
+ addr = __DFIR_CFG_ADDR(i);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]);
+ rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i],
+ phy_idx);
+ }
+
+#undef __DECL_DFIR_PARAM
+#undef __DFIR_CFG_NR
+#undef __DFIR_CFG_MASK
+#undef __DECL_CFG_ADDR
+}
+
+static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 band = chan->band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ u8 tx_shape_cck = rtw89_8852b_tx_shape[band][RTW89_RS_CCK][regd];
+ u8 tx_shape_ofdm = rtw89_8852b_tx_shape[band][RTW89_RS_OFDM][regd];
+
+ if (band == RTW89_BAND_2G)
+ rtw8852b_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
+ tx_shape_ofdm);
+}
+
+static void rtw8852b_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852b_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void rtw8852b_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852b_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static
+void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ u32 reg;
+
+ if (pw_ofst < -16 || pw_ofst > 15) {
+ rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+ return;
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
+
+ pw_ofst = max_t(s8, pw_ofst - 3, -16);
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
+}
+
+static int
+rtw8852b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
+ if (ret)
+ return ret;
+
+ rtw8852b_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
+ RTW89_MAC_1 : RTW89_MAC_0);
+
+ return 0;
+}
+
+void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_reg3_def *def = rtw8852b_pmac_ht20_mcs7_tbl;
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(rtw8852b_pmac_ht20_mcs7_tbl); i++, def++)
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void rtw8852b_stop_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852b_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx");
+ if (tx_info->mode == CONT_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx);
+ else if (tx_info->mode == PKTS_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx);
+}
+
+static void rtw8852b_start_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852b_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ enum rtw8852b_pmac_mode mode = tx_info->mode;
+ u32 pkt_cnt = tx_info->tx_cnt;
+ u16 period = tx_info->period;
+
+ if (mode == CONT_TX && !tx_info->is_cck) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start");
+ } else if (mode == PKTS_TX) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD,
+ B_PMAC_TX_PRD_MSK, period, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK,
+ pkt_cnt, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start");
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx);
+}
+
+void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852b_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (!tx_info->en_pmac_tx) {
+ rtw8852b_stop_pmac_tx(rtwdev, tx_info, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable");
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx);
+ rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx);
+
+ rtw8852b_start_pmac_tx(rtwdev, tx_info, idx);
+}
+
+void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx)
+{
+ struct rtw8852b_bb_pmac_info tx_info = {0};
+
+ tx_info.en_pmac_tx = enable;
+ tx_info.is_cck = 0;
+ tx_info.mode = PKTS_TX;
+ tx_info.tx_cnt = tx_cnt;
+ tx_info.period = period;
+ tx_info.tx_time = tx_time;
+
+ rtw8852b_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+}
+
+void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm);
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx);
+}
+
+void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
+{
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path);
+
+ if (tx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_B) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path");
+ }
+}
+
+void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode)
+{
+ if (mode != 0)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch");
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx);
+}
+
+void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852b_bb_tssi_bak *bak)
+{
+ s32 tmp;
+
+ bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx);
+ bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx);
+ bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx);
+ bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx);
+ bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx);
+ bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx);
+ tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx);
+ bak->tx_pwr = sign_extend32(tmp, 8);
+}
+
+void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852b_bb_tssi_bak *bak)
+{
+ rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx);
+ if (bak->tx_path == RF_AB)
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx);
+}
+
+static void rtw8852b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en)
+{
+ rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852b_btc_preagc_en_defs_tbl :
+ &rtw8852b_btc_preagc_dis_defs_tbl);
+}
+
+static void rtw8852b_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
+{
+ if (btg) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x20);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
+ }
+}
+
+void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u32 rst_mask0;
+ u32 rst_mask1;
+
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else if (rx_path == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else if (rx_path == RF_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw8852b_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0);
+
+ if (chan->band_type == RTW89_BAND_2G &&
+ (rx_path == RF_B || rx_path == RF_AB))
+ rtw8852b_ctrl_btg(rtwdev, true);
+ else
+ rtw8852b_ctrl_btg(rtwdev, false);
+
+ rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
+ rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
+ }
+}
+
+static void rtw8852b_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x333);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x111);
+ } else if (rx_path == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x111);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x333);
+ } else if (rx_path == RF_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x333);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x333);
+ }
+}
+
+static void rtw8852b_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
+
+ rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852b_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
+
+ if (rtwdev->hal.rx_nss == 1) {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0);
+}
+
+static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ if (rtwdev->is_tssi_mode[rf_path]) {
+ u32 addr = 0x1c10 + (rf_path << 13);
+
+ return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000);
+ }
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
+}
+
+static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->cv = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+
+ if (module->rfe_type > 0)
+ module->ant.num = module->rfe_type % 2 ? 2 : 3;
+ else
+ module->ant.num = 2;
+
+ module->ant.diversity = 0;
+ module->ant.isolation = 10;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+}
+
+static
+void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_mac_ax_coex coex_params = {
+ .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
+ .direction = RTW89_MAC_AX_COEX_INNER,
+ };
+
+ /* PTA init */
+ rtw89_mac_coex_init(rtwdev, &coex_params);
+
+ /* set WL Tx response = Hi-Pri */
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+
+ /* set rf gnt debug off */
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
+
+ /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
+ if (module->ant.type == BTC_ANT_SHARED) {
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f);
+ } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff);
+ }
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* enable BT counter 0xda40[16,2] = 2b'11 */
+ rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static
+void rtw8852b_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ u32 bitmap;
+ u32 reg;
+
+ switch (map) {
+ case BTC_PRI_MASK_TX_RESP:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_TX_RESP_V1;
+ break;
+ case BTC_PRI_MASK_BEACON:
+ reg = R_AX_WL_PRI_MSK;
+ bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ;
+ break;
+ case BTC_PRI_MASK_RX_CCK:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_RXCCK_V1;
+ break;
+ default:
+ return;
+ }
+
+ if (state)
+ rtw89_write32_set(rtwdev, reg, bitmap);
+ else
+ rtw89_write32_clr(rtwdev, reg, bitmap);
+}
+
+union rtw8852b_btc_wl_txpwr_ctrl {
+ u32 txpwr_val;
+ struct {
+ union {
+ u16 ctrl_all_time;
+ struct {
+ s16 data:9;
+ u16 rsvd:6;
+ u16 flag:1;
+ } all_time;
+ };
+ union {
+ u16 ctrl_gnt_bt;
+ struct {
+ s16 data:9;
+ u16 rsvd:7;
+ } gnt_bt;
+ };
+ };
+} __packed;
+
+static void
+rtw8852b_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ union rtw8852b_btc_wl_txpwr_ctrl arg = { .txpwr_val = txpwr_val };
+ s32 val;
+
+#define __write_ctrl(_reg, _msk, _val, _en, _cond) \
+do { \
+ u32 _wrt = FIELD_PREP(_msk, _val); \
+ BUILD_BUG_ON(!!(_msk & _en)); \
+ if (_cond) \
+ _wrt |= _en; \
+ else \
+ _wrt &= ~_en; \
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, _reg, \
+ _msk | _en, _wrt); \
+} while (0)
+
+ switch (arg.ctrl_all_time) {
+ case 0xffff:
+ val = 0;
+ break;
+ default:
+ val = arg.all_time.data;
+ break;
+ }
+
+ __write_ctrl(R_AX_PWR_RATE_CTRL, B_AX_FORCE_PWR_BY_RATE_VALUE_MASK,
+ val, B_AX_FORCE_PWR_BY_RATE_EN,
+ arg.ctrl_all_time != 0xffff);
+
+ switch (arg.ctrl_gnt_bt) {
+ case 0xffff:
+ val = 0;
+ break;
+ default:
+ val = arg.gnt_bt.data;
+ break;
+ }
+
+ __write_ctrl(R_AX_PWR_COEXT_CTRL, B_AX_TXAGC_BT_MASK, val,
+ B_AX_TXAGC_BT_EN, arg.ctrl_gnt_bt != 0xffff);
+
+#undef __write_ctrl
+}
+
+static
+s8 rtw8852b_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return clamp_t(s8, val, -100, 0) + 100;
+}
+
+static
+void rtw8852b_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ /* Feature move to firmware */
+}
+
+static void rtw8852b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31);
+
+ /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
+ if (state)
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x579);
+ else
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+}
+
+static void rtw8852b_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 chan = phy_ppdu->chan_idx;
+ u8 band;
+
+ if (chan == 0)
+ return;
+
+ band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ status->freq = ieee80211_channel_to_frequency(chan, band);
+ status->band = band;
+}
+
+static void rtw8852b_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8852b_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
int ret;
@@ -75,13 +2381,150 @@ static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.enable_bb_rf = rtw8852b_mac_enable_bb_rf,
.disable_bb_rf = rtw8852b_mac_disable_bb_rf,
+ .bb_reset = rtw8852b_bb_reset,
+ .bb_sethw = rtw8852b_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
+ .set_channel = rtw8852b_set_channel,
+ .set_channel_help = rtw8852b_set_channel_help,
+ .read_efuse = rtw8852b_read_efuse,
+ .read_phycap = rtw8852b_read_phycap,
+ .fem_setup = NULL,
+ .rfk_init = rtw8852b_rfk_init,
+ .rfk_channel = rtw8852b_rfk_channel,
+ .rfk_band_changed = rtw8852b_rfk_band_changed,
+ .rfk_scan = rtw8852b_rfk_scan,
+ .rfk_track = rtw8852b_rfk_track,
+ .power_trim = rtw8852b_power_trim,
+ .set_txpwr = rtw8852b_set_txpwr,
+ .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852b_init_txpwr_unit,
+ .get_thermal = rtw8852b_get_thermal,
+ .ctrl_btg = rtw8852b_ctrl_btg,
+ .query_ppdu = rtw8852b_query_ppdu,
+ .bb_ctrl_btc_preagc = rtw8852b_bb_ctrl_btc_preagc,
+ .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset,
+ .pwr_on_func = rtw8852b_pwr_on_func,
+ .pwr_off_func = rtw8852b_pwr_off_func,
+ .fill_txdesc = rtw89_core_fill_txdesc,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx,
+ .h2c_dctl_sec_cam = NULL,
+
+ .btc_set_rfe = rtw8852b_btc_set_rfe,
+ .btc_init_cfg = rtw8852b_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852b_btc_set_wl_pri,
+ .btc_set_wl_txpwr_ctrl = rtw8852b_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8852b_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy,
};
const struct rtw89_chip_info rtw8852b_chip_info = {
.chip_id = RTL8852B,
+ .ops = &rtw8852b_chip_ops,
+ .fw_name = "rtw89/rtw8852b_fw.bin",
.fifo_size = 196608,
.dle_scc_rsvd_size = 98304,
+ .max_amsdu_limit = 3500,
+ .dis_2g_40m_ul_ofdma = true,
+ .rsvd_ple_ofst = 0x2f800,
+ .hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
.dle_mem = rtw8852b_dle_mem_pcie,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .bb_table = &rtw89_8852b_phy_bb_table,
+ .bb_gain_table = &rtw89_8852b_phy_bb_gain_table,
+ .rf_table = {&rtw89_8852b_phy_radioa_table,
+ &rtw89_8852b_phy_radiob_table,},
+ .nctl_table = &rtw89_8852b_phy_nctl_table,
+ .byr_table = &rtw89_8852b_byr_table,
+ .txpwr_lmt_2g = &rtw89_8852b_txpwr_lmt_2g,
+ .txpwr_lmt_5g = &rtw89_8852b_txpwr_lmt_5g,
+ .txpwr_lmt_ru_2g = &rtw89_8852b_txpwr_lmt_ru_2g,
+ .txpwr_lmt_ru_5g = &rtw89_8852b_txpwr_lmt_ru_5g,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .dig_regs = &rtw8852b_dig_regs,
+ .tssi_dbw_table = NULL,
+ .support_chanctx_num = 0,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bw160 = false,
+ .support_ul_tb_ctrl = true,
+ .hw_sec_hdr = false,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 10,
+ .scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_v1 = false,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 96,
+ .dav_log_efuse_size = 16,
+ .phycap_addr = 0x580,
+ .phycap_size = 128,
+ .para_ver = 0,
+ .wlcx_desired = 0x05050000,
+ .btcx_desired = 0x5,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+ .btc_fwinfo_buf = 1024,
+
+ .fcxbtcrpt_ver = 1,
+ .fcxtdma_ver = 1,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 2,
+ .fcxstep_ver = 2,
+ .fcxnullsta_ver = 1,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8852b_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8852b_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852b_mon_reg),
+ .mon_reg = rtw89_btc_8852b_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852b_rf_ul),
+ .rf_para_ulink = rtw89_btc_8852b_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852b_rf_dl),
+ .rf_para_dlink = rtw89_btc_8852b_rf_dl,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_txwd_body),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body),
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL,
+ .h2c_regs = rtw8852b_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL,
+ .c2h_regs = rtw8852b_c2h_regs,
+ .page_regs = &rtw8852b_page_regs,
+ .cfo_src_fd = true,
+ .dcfo_comp = &rtw8852b_dcfo_comp,
+ .dcfo_comp_sft = 3,
+ .imr_info = &rtw8852b_imr_info,
+ .rrsr_cfgs = &rtw8852b_rrsr_cfgs,
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.h b/drivers/net/wireless/realtek/rtw89/rtw8852b.h
new file mode 100644
index 000000000000..4f9b3d476879
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852B_H__
+#define __RTW89_8852B_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852B 2
+#define BB_PATH_NUM_8852B 2
+
+enum rtw8852b_pmac_mode {
+ NONE_TEST,
+ PKTS_TX,
+ PKTS_RX,
+ CONT_TX
+};
+
+struct rtw8852b_u_efuse {
+ u8 rsvd[0x88];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852b_e_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852b_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852b_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852b_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852b_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[2];
+ u8 rx_gain_2g_ofdm;
+ u8 rsvd9;
+ u8 rx_gain_2g_cck;
+ u8 rsvd10;
+ u8 rx_gain_5g_low;
+ u8 rsvd11;
+ u8 rx_gain_5g_mid;
+ u8 rsvd12;
+ u8 rx_gain_5g_high;
+ u8 rsvd13[35];
+ u8 path_a_cck_pwr_idx[6];
+ u8 path_a_bw40_1tx_pwr_idx[5];
+ u8 path_a_ofdm_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_2tx_pwr_idx_diff:4;
+ u8 path_a_bw40_2tx_pwr_idx_diff:4;
+ u8 path_a_cck_2tx_pwr_idx_diff:4;
+ u8 path_a_ofdm_2tx_pwr_idx_diff:4;
+ u8 rsvd14[0xf2];
+ union {
+ struct rtw8852b_u_efuse u;
+ struct rtw8852b_e_efuse e;
+ };
+} __packed;
+
+struct rtw8852b_bb_pmac_info {
+ u8 en_pmac_tx:1;
+ u8 is_cck:1;
+ u8 mode:3;
+ u8 rsvd:3;
+ u16 tx_cnt;
+ u16 period;
+ u16 tx_time;
+ u8 duty_cycle;
+};
+
+struct rtw8852b_bb_tssi_bak {
+ u8 tx_path;
+ u8 rx_path;
+ u32 p0_rfmode;
+ u32 p0_rfmode_ftm;
+ u32 p1_rfmode;
+ u32 p1_rfmode_ftm;
+ s16 tx_pwr; /* S9 */
+};
+
+extern const struct rtw89_chip_info rtw8852b_chip_info;
+
+void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
+void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852b_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx);
+void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx);
+void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx);
+void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
+void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path);
+void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode);
+void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852b_bb_tssi_bak *bak);
+void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852b_bb_tssi_bak *bak);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
new file mode 100644
index 000000000000..722ae34b09c1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -0,0 +1,4174 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852b.h"
+#include "rtw8852b_rfk.h"
+#include "rtw8852b_rfk_table.h"
+#include "rtw8852b_table.h"
+
+#define RTW8852B_RXDCK_VER 0x1
+#define RTW8852B_IQK_VER 0x2a
+#define RTW8852B_IQK_SS 2
+#define RTW8852B_RXK_GROUP_NR 4
+#define RTW8852B_TSSI_PATH_NR 2
+#define RTW8852B_RF_REL_VERSION 34
+#define RTW8852B_DPK_VER 0x0d
+#define RTW8852B_DPK_RF_PATH 2
+#define RTW8852B_DPK_KIP_REG_NUM 2
+
+#define _TSSI_DE_MASK GENMASK(21, 12)
+#define ADDC_T_AVG 100
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+#define RFREG_MASKRXBB 0x003e0
+#define RFREG_MASKMODE 0xf0000
+
+enum rtw8852b_dpk_id {
+ LBK_RXIQK = 0x06,
+ SYNC = 0x10,
+ MDPK_IDL = 0x11,
+ MDPK_MPA = 0x12,
+ GAIN_LOSS = 0x13,
+ GAIN_CAL = 0x14,
+ DPK_RXAGC = 0x15,
+ KIP_PRESET = 0x16,
+ KIP_RESTORE = 0x17,
+ DPK_TXAGC = 0x19,
+ D_KIP_PRESET = 0x28,
+ D_TXAGC = 0x29,
+ D_RXAGC = 0x2a,
+ D_SYNC = 0x2b,
+ D_GAIN_LOSS = 0x2c,
+ D_MDPK_IDL = 0x2d,
+ D_GAIN_NORM = 0x2f,
+ D_KIP_THERMAL = 0x30,
+ D_KIP_RESTORE = 0x31
+};
+
+enum dpk_agc_step {
+ DPK_AGC_STEP_SYNC_DGAIN,
+ DPK_AGC_STEP_GAIN_ADJ,
+ DPK_AGC_STEP_GAIN_LOSS_IDX,
+ DPK_AGC_STEP_GL_GT_CRITERION,
+ DPK_AGC_STEP_GL_LT_CRITERION,
+ DPK_AGC_STEP_SET_TX_GAIN,
+};
+
+enum rtw8852b_iqk_type {
+ ID_TXAGC = 0x0,
+ ID_FLOK_COARSE = 0x1,
+ ID_FLOK_FINE = 0x2,
+ ID_TXK = 0x3,
+ ID_RXAGC = 0x4,
+ ID_RXK = 0x5,
+ ID_NBTXK = 0x6,
+ ID_NBRXK = 0x7,
+ ID_FLOK_VBUFFER = 0x8,
+ ID_A_FLOK_COARSE = 0x9,
+ ID_G_FLOK_COARSE = 0xa,
+ ID_A_FLOK_FINE = 0xb,
+ ID_G_FLOK_FINE = 0xc,
+ ID_IQK_RESTORE = 0x10,
+};
+
+static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820};
+static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18};
+static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = {
+ {0x5634, 0x5630, 0x5630, 0x5630},
+ {0x7634, 0x7630, 0x7630, 0x7630} };
+static const u32 _tssi_cw_default_mask[4] = {
+ 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
+static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858};
+static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860};
+static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838};
+static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840};
+static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848};
+static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850};
+static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828};
+static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830};
+static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352};
+static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f};
+static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0};
+static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360};
+static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f};
+static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1};
+static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
+static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6};
+static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
+static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b};
+static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
+static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6};
+static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
+static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24};
+
+static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800};
+static const u32 rtw8852b_backup_rf_regs[] = {
+ 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005
+};
+
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs)
+
+static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = {
+ {0x20fc, 0xffff0000, 0x0303},
+ {0x5864, 0x18000000, 0x3},
+ {0x7864, 0x18000000, 0x3},
+ {0x12b8, 0x40000000, 0x1},
+ {0x32b8, 0x40000000, 0x1},
+ {0x030c, 0xff000000, 0x13},
+ {0x032c, 0xffff0000, 0x0041},
+ {0x12b8, 0x10000000, 0x1},
+ {0x58c8, 0x01000000, 0x1},
+ {0x78c8, 0x01000000, 0x1},
+ {0x5864, 0xc0000000, 0x3},
+ {0x7864, 0xc0000000, 0x3},
+ {0x2008, 0x01ffffff, 0x1ffffff},
+ {0x0c1c, 0x00000004, 0x1},
+ {0x0700, 0x08000000, 0x1},
+ {0x0c70, 0x000003ff, 0x3ff},
+ {0x0c60, 0x00000003, 0x3},
+ {0x0c6c, 0x00000001, 0x1},
+ {0x58ac, 0x08000000, 0x1},
+ {0x78ac, 0x08000000, 0x1},
+ {0x0c3c, 0x00000200, 0x1},
+ {0x2344, 0x80000000, 0x1},
+ {0x4490, 0x80000000, 0x1},
+ {0x12a0, 0x00007000, 0x7},
+ {0x12a0, 0x00008000, 0x1},
+ {0x12a0, 0x00070000, 0x3},
+ {0x12a0, 0x00080000, 0x1},
+ {0x32a0, 0x00070000, 0x3},
+ {0x32a0, 0x00080000, 0x1},
+ {0x0700, 0x01000000, 0x1},
+ {0x0700, 0x06000000, 0x2},
+ {0x20fc, 0xffff0000, 0x3333},
+};
+
+static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = {
+ {0x20fc, 0xffff0000, 0x0303},
+ {0x12b8, 0x40000000, 0x0},
+ {0x32b8, 0x40000000, 0x0},
+ {0x5864, 0xc0000000, 0x0},
+ {0x7864, 0xc0000000, 0x0},
+ {0x2008, 0x01ffffff, 0x0000000},
+ {0x0c1c, 0x00000004, 0x0},
+ {0x0700, 0x08000000, 0x0},
+ {0x0c70, 0x0000001f, 0x03},
+ {0x0c70, 0x000003e0, 0x03},
+ {0x12a0, 0x000ff000, 0x00},
+ {0x32a0, 0x000ff000, 0x00},
+ {0x0700, 0x07000000, 0x0},
+ {0x20fc, 0xffff0000, 0x0000},
+ {0x58c8, 0x01000000, 0x0},
+ {0x78c8, 0x01000000, 0x0},
+ {0x0c3c, 0x00000200, 0x0},
+ {0x2344, 0x80000000, 0x0},
+};
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]backup bb reg : %x, value =%x\n",
+ rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
+ u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path,
+ rtw8852b_backup_rf_regs[i], RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
+ rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
+ const u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore bb reg : %x, value =%x\n",
+ rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
+ const u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
+{
+ bool fail = true;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
+
+ udelay(200);
+
+ if (!ret)
+ fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
+ val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val);
+
+ return fail;
+}
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
+ rtwdev->dbcc_en, phy_idx);
+
+ if (!rtwdev->dbcc_en) {
+ val = RF_AB;
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ val = RF_A;
+ else
+ val = RF_B;
+ }
+ return val;
+}
+
+static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+ mdelay(1);
+}
+
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, dck_tune;
+ u32 rf_reg5;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
+ RTW8852B_RXDCK_VER, rtwdev->hal.cv);
+
+ for (path = 0; path < RF_PATH_NUM_8852B; path++) {
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+ dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev,
+ R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x1);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _set_rx_dck(rtwdev, phy, path);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev,
+ R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x0);
+ }
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5;
+ u32 rck_val;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
+ false, rtwdev, path, RR_RCKS, BIT(3));
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
+ rck_val, ret);
+
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
+}
+
+static void _afe_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl);
+}
+
+static void _drck(struct rtw89_dev *rtwdev)
+{
+ u32 rck_d;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
+ rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD));
+}
+
+static void _addck_backup(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
+ dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
+ dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
+ dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
+ dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ /* S0 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3);
+
+ /* S1 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3);
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
+ dack->msbk_d[0][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
+ dack->msbk_d[0][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
+ }
+
+ dack->biask_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
+ dack->biask_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
+
+ dack->dadck_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
+ dack->dadck_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
+}
+
+static void _dack_backup_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
+ dack->msbk_d[1][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
+ rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
+ dack->msbk_d[1][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
+ }
+
+ dack->biask_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
+ dack->biask_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
+
+ dack->dadck_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
+ dack->dadck_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
+}
+
+static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ s32 dc_re = 0, dc_im = 0;
+ u32 tmp;
+ u32 i;
+
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_check_addc_defs_a_tbl,
+ &rtw8852b_check_addc_defs_b_tbl);
+
+ for (i = 0; i < ADDC_T_AVG; i++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
+ dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
+ dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
+ }
+
+ dc_re /= ADDC_T_AVG;
+ dc_im /= ADDC_T_AVG;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
+}
+
+static void _addck(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ /* S0 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, R_ADDCKR0, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+
+ /* S1 */
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, R_ADDCKR1, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
+ dack->addck_timeout[1] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_check_dadc_en_defs_a_tbl,
+ &rtw8852b_check_dadc_en_defs_b_tbl);
+
+ _check_addc(rtwdev, path);
+
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_check_dadc_dis_defs_a_tbl,
+ &rtw8852b_check_dadc_dis_defs_b_tbl);
+}
+
+static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1)
+{
+ if (part1) {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0)
+ return false;
+ } else {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl);
+
+ ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
+ false, rtwdev, true);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl);
+
+ ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
+ false, rtwdev, false);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
+ dack->dadck_timeout[0] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
+
+ _dack_backup_s0(rtwdev);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+}
+
+static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1)
+{
+ if (part1) {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 &&
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0)
+ return false;
+ } else {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 &&
+ rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void _dack_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl);
+
+ ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
+ false, rtwdev, true);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
+ dack->msbk_timeout[1] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl);
+
+ ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
+ false, rtwdev, false);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
+ dack->dadck_timeout[1] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
+
+ _check_dadc(rtwdev, RF_PATH_B);
+ _dack_backup_s1(rtwdev);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+ _dack_s1(rtwdev);
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[1][0], dack->addck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[1][0], dack->biask_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < 0x10; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 rf0_0, rf1_0;
+
+ dack->dack_done = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n");
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+
+ rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
+ rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
+ _afe_init(rtwdev);
+ _drck(rtwdev);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
+ _addck(rtwdev);
+ _addck_backup(rtwdev);
+ _addck_reload(rtwdev);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
+ _dack(rtwdev);
+ _dack_dump(rtwdev);
+ dack->dack_done = true;
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 tmp;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
+ tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
+ tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 iqk_cmd;
+ bool fail;
+
+ switch (ktype) {
+ case ID_FLOK_COARSE:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_FLOK_FINE:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ iqk_cmd = 0x208 | (1 << (4 + path));
+ break;
+ case ID_FLOK_VBUFFER:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_RXAGC:
+ iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_NBTXK:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ udelay(1);
+ fail = _iqk_check_cal(rtwdev, path);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ return fail;
+}
+
+static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool fail;
+ u8 gp;
+
+ for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
+ _g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
+ _g_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
+ _g_idxattc1[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
+ _a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
+ _a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
+ _a_idxattc1[gp]);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SET, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP_V1, gp);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF,
+ BIT(16 + gp + path * 4), fail);
+ kfail |= fail;
+ }
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
+
+ if (kfail) {
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x0);
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->nb_rxcfir[path] = 0x40000000;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x5);
+ iqk_info->is_wb_rxiqk[path] = true;
+ }
+
+ return kfail;
+}
+
+static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ const u8 gp = 0x3;
+ bool kfail = false;
+ bool fail;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
+ _g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
+ _g_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
+ _g_idxattc1[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
+ _a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
+ _a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
+ _a_idxattc1[gp]);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(1);
+
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
+ kfail |= fail;
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
+
+ if (!kfail)
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2;
+ else
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+
+ return kfail;
+}
+
+static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0);
+ }
+}
+
+static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool fail;
+ u8 gp;
+
+ for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _a_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _a_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SET, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF,
+ BIT(8 + gp + path * 4), fail);
+ kfail |= fail;
+ }
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x0);
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ iqk_info->nb_txcfir[path] = 0x40000000;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x5);
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ return kfail;
+}
+
+static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail;
+ u8 gp = 0x3;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _a_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _a_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+
+ if (!kfail)
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ else
+ iqk_info->nb_txcfir[path] = 0x40000002;
+
+ return kfail;
+}
+
+static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
+ else
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK));
+}
+
+static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool is_fail1, is_fail2;
+ u32 vbuff_i;
+ u32 vbuff_q;
+ u32 core_i;
+ u32 core_q;
+ u32 tmp;
+ u8 ch;
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
+ core_i = FIELD_GET(RR_TXMO_COI, tmp);
+ core_q = FIELD_GET(RR_TXMO_COQ, tmp);
+ ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR;
+
+ if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
+ is_fail1 = true;
+ else
+ is_fail1 = false;
+
+ iqk_info->lok_idac[ch][path] = tmp;
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
+ vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp);
+ vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp);
+
+ if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
+ is_fail2 = true;
+ else
+ is_fail2 = false;
+
+ iqk_info->lok_vbuf[ch][path] = tmp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path,
+ iqk_info->lok_idac[ch][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path,
+ iqk_info->lok_vbuf[ch][path]);
+
+ return is_fail1 | is_fail2;
+}
+
+static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool tmp;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
+ break;
+ default:
+ break;
+ }
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
+ iqk_info->lok_cor_fail[0][path] = tmp;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
+ iqk_info->lok_fin_fail[0][path] = tmp;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
+ _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
+
+ return _lok_finetune_check(rtwdev, path);
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
+ udelay(1);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
+ udelay(1);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+}
+
+static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 tmp;
+ bool flag;
+
+ iqk_info->thermal[path] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ iqk_info->thermal_rek_en = false;
+
+ flag = iqk_info->lok_cor_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
+ flag = iqk_info->lok_fin_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
+ flag = iqk_info->iqk_tx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
+ flag = iqk_info->iqk_rx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
+ iqk_info->bp_iqkenable[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_txkresult[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_rxkresult[path] = tmp;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
+ if (tmp)
+ iqk_info->iqk_fail_cnt++;
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
+ iqk_info->iqk_fail_cnt);
+}
+
+static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool lok_is_fail = false;
+ const int try = 3;
+ u8 ibias = 0x1;
+ u8 i;
+
+ _iqk_txclk_setting(rtwdev, path);
+
+ /* LOK */
+ for (i = 0; i < try; i++) {
+ _lok_res_table(rtwdev, path, ibias++);
+ _iqk_txk_setting(rtwdev, path);
+ lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
+ if (!lok_is_fail)
+ break;
+ }
+
+ if (lok_is_fail)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path);
+
+ /* TXK */
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+
+ /* RX */
+ _iqk_rxclk_setting(rtwdev, path);
+ _iqk_rxk_setting(rtwdev, path);
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_info_iqk(rtwdev, phy_idx, path);
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 reg_rf18;
+ u32 reg_35c;
+ u8 idx;
+ u8 get_empty_table = false;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
+ get_empty_table = true;
+ break;
+ }
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
+
+ if (!get_empty_table) {
+ idx = iqk_info->iqk_table_idx[path] + 1;
+ if (idx > 1)
+ idx = 0;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
+
+ reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
+
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
+ iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
+ iqk_info->iqk_table_idx[path] = idx;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
+ path, reg_rf18, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
+ path, reg_rf18);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
+ iqk_info->iqk_times, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
+ idx, path, iqk_info->iqk_mcc_ch[idx][path]);
+
+ if (reg_35c == 0x01)
+ iqk_info->syn1to2 = 0x1;
+ else
+ iqk_info->syn1to2 = 0x0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path,
+ iqk_info->syn1to2);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER);
+ /* 2GHz/5GHz/6GHz = 0/1/2 */
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
+ iqk_info->iqk_band[path]);
+ /* 20/40/80 = 0/1/2 */
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
+ iqk_info->iqk_bw[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
+ iqk_info->iqk_ch[path]);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_txcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_rxcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000e19 + (path << 4));
+ fail = _iqk_check_cal(rtwdev, path);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ const struct rtw89_reg3_def *def;
+ int size;
+ u8 kpath;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
+
+ kpath = _kpath(rtwdev, phy_idx);
+
+ switch (kpath) {
+ case RF_A:
+ case RF_B:
+ return;
+ default:
+ size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01);
+ def = rtw8852b_restore_nondbcc_path01;
+ break;
+ }
+
+ for (i = 0; i < size; i++, def++)
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx;
+
+ idx = iqk_info->iqk_table_idx[path];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx);
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path,
+ rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path,
+ rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD));
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ const struct rtw89_reg3_def *def;
+ int size;
+ u8 kpath;
+ int i;
+
+ kpath = _kpath(rtwdev, phy_idx);
+
+ switch (kpath) {
+ case RF_A:
+ case RF_B:
+ return;
+ default:
+ size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01);
+ def = rtw8852b_set_nondbcc_path01;
+ break;
+ }
+
+ for (i = 0; i < size; i++, def++)
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx, path;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->thermal_rek_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ iqk_info->iqk_channel[idx] = 0x0;
+ for (path = 0; path < RTW8852B_IQK_SS; path++) {
+ iqk_info->lok_cor_fail[idx][path] = false;
+ iqk_info->lok_fin_fail[idx][path] = false;
+ iqk_info->iqk_tx_fail[idx][path] = false;
+ iqk_info->iqk_rx_fail[idx][path] = false;
+ iqk_info->iqk_mcc_ch[idx][path] = 0x0;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
+ rf_mode != 2, 2, 5000, false,
+ rtwdev, path, RR_MOD, RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
+ }
+}
+
+static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
+ bool is_pause)
+{
+ if (!is_pause)
+ return;
+
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK strat!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->kcount = 0;
+ iqk_info->version = RTW8852B_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+
+ _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
+ _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
+ _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ u8 kpath = _kpath(rtwdev, phy_idx);
+
+ switch (kpath) {
+ case RF_A:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ break;
+ case RF_B:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ case RF_AB:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+ u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
+ reg_bkup[path][i] =
+ rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+ const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
+ reg_bkup[path][i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
+{
+ u8 order;
+ u8 val;
+
+ order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
+ val = 0x3 >> order;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
+
+ return val;
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 val, kidx = dpk->cur_idx[path];
+
+ val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+ kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
+}
+
+static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
+{
+ u16 dpk_cmd;
+ u32 val;
+ int ret;
+
+ dpk_cmd = (id << 8) | (0x19 + (path << 4));
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 1, 20000, false,
+ rtwdev, 0xbff8, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
+
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+ 1, 2000, false,
+ rtwdev, 0x80fc, MASKLWORD);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot for %s = 0x%x\n",
+ id == 0x06 ? "LBK_RXIQK" :
+ id == 0x10 ? "SYNC" :
+ id == 0x11 ? "MDPK_IDL" :
+ id == 0x12 ? "MDPK_MPA" :
+ id == 0x13 ? "GAIN_LOSS" :
+ id == 0x14 ? "PWR_CAL" :
+ id == 0x15 ? "DPK_RXAGC" :
+ id == 0x16 ? "KIP_PRESET" :
+ id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC",
+ dpk_cmd);
+}
+
+static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
+ _set_rx_dck(rtwdev, phy, path);
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+ path, dpk->cur_idx[path], phy,
+ rtwdev->is_tssi_mode[path] ? "on" : "off",
+ rtwdev->dbcc_en ? "on" : "off",
+ dpk->bp[path][kidx].band == 0 ? "2G" :
+ dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+ dpk->bp[path][kidx].ch,
+ dpk->bp[path][kidx].bw == 0 ? "20M" :
+ dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
+}
+
+static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x0);
+ }
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_pause)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, is_pause);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+ is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
+
+ if (rtwdev->hal.cv > CHIP_CAV)
+ rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ u8 cur_rxbb;
+ u32 tmp;
+
+ cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
+
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
+
+ if (cur_rxbb >= 0x11)
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
+ else if (cur_rxbb <= 0xa)
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
+
+ rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
+ udelay(70);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
+
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
+}
+
+static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+
+ udelay(200);
+
+ dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
+ dpk->bp[path][kidx].ther_dpk);
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
+ rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
+ rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
+}
+
+static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bypass)
+{
+ if (is_bypass) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS, 0x1);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD));
+ } else {
+ rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
+ rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD));
+ }
+}
+
+static
+void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static void _dpk_table_select(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ u8 val;
+
+ val = 0x80 + kidx * 0x20 + gain * 0x10;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
+ gain, val);
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u16 dc_i, dc_q;
+ u8 corr_val, corr_idx;
+
+ rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
+
+ corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
+ corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
+ path, corr_idx, corr_val);
+
+ dpk->corr_idx[path][kidx] = corr_idx;
+ dpk->corr_val[path][kidx] = corr_val;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+ dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+ dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
+
+ dc_i = abs(sign_extend32(dc_i, 11));
+ dc_q = abs(sign_extend32(dc_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
+ path, dc_i, dc_q);
+
+ dpk->dc_i[path][kidx] = dc_i;
+ dpk->dc_q[path][kidx] = dc_q;
+
+ if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+ corr_val < DPK_SYNC_TH_CORR)
+ return true;
+ else
+ return false;
+}
+
+static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, SYNC);
+
+ return _dpk_sync_check(rtwdev, path, kidx);
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+ u16 dgain;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
+
+ return dgain;
+}
+
+static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
+{
+ static const u16 bnd[15] = {
+ 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
+ 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
+ };
+ s8 offset;
+
+ if (dgain >= bnd[0])
+ offset = 0x6;
+ else if (bnd[0] > dgain && dgain >= bnd[1])
+ offset = 0x6;
+ else if (bnd[1] > dgain && dgain >= bnd[2])
+ offset = 0x5;
+ else if (bnd[2] > dgain && dgain >= bnd[3])
+ offset = 0x4;
+ else if (bnd[3] > dgain && dgain >= bnd[4])
+ offset = 0x3;
+ else if (bnd[4] > dgain && dgain >= bnd[5])
+ offset = 0x2;
+ else if (bnd[5] > dgain && dgain >= bnd[6])
+ offset = 0x1;
+ else if (bnd[6] > dgain && dgain >= bnd[7])
+ offset = 0x0;
+ else if (bnd[7] > dgain && dgain >= bnd[8])
+ offset = 0xff;
+ else if (bnd[8] > dgain && dgain >= bnd[9])
+ offset = 0xfe;
+ else if (bnd[9] > dgain && dgain >= bnd[10])
+ offset = 0xfd;
+ else if (bnd[10] > dgain && dgain >= bnd[11])
+ offset = 0xfc;
+ else if (bnd[11] > dgain && dgain >= bnd[12])
+ offset = 0xfb;
+ else if (bnd[12] > dgain && dgain >= bnd[13])
+ offset = 0xfa;
+ else if (bnd[13] > dgain && dgain >= bnd[14])
+ offset = 0xf9;
+ else if (bnd[14] > dgain)
+ offset = 0xf8;
+ else
+ offset = 0x0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
+
+ return offset;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+
+ return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+}
+
+static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_table_select(rtwdev, path, kidx, 1);
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+}
+
+static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_tpg_sel(rtwdev, path, kidx);
+ _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
+}
+
+static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
+}
+
+static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 txagc)
+{
+ rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
+}
+
+static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ u32 tmp;
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
+}
+
+static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, s8 gain_offset)
+{
+ u8 txagc;
+
+ txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
+
+ if (txagc - gain_offset < DPK_TXAGC_LOWER)
+ txagc = DPK_TXAGC_LOWER;
+ else if (txagc - gain_offset > DPK_TXAGC_UPPER)
+ txagc = DPK_TXAGC_UPPER;
+ else
+ txagc = txagc - gain_offset;
+
+ _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
+ gain_offset, txagc);
+ return txagc;
+}
+
+static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+{
+ u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
+
+ if (is_check) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+ val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val1_i = abs(sign_extend32(val1_i, 11));
+ val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val1_q = abs(sign_extend32(val1_q, 11));
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+ val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val2_i = abs(sign_extend32(val2_i, 11));
+ val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val2_q = abs(sign_extend32(val2_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
+ } else {
+ for (i = 0; i < 32; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+ }
+
+ if (val1_i * val1_i + val1_q * val1_q >=
+ (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
+ return true;
+
+ return false;
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
+ bool loss_only)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+ u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
+ u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
+ u16 dgain = 0;
+ s8 offset;
+ int limit = 200;
+
+ tmp_txagc = init_txagc;
+
+ do {
+ switch (step) {
+ case DPK_AGC_STEP_SYNC_DGAIN:
+ if (_dpk_sync(rtwdev, phy, path, kidx)) {
+ tmp_txagc = 0xff;
+ goout = 1;
+ break;
+ }
+
+ dgain = _dpk_dgain_read(rtwdev);
+
+ if (loss_only == 1 || limited_rxbb == 1)
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ else
+ step = DPK_AGC_STEP_GAIN_ADJ;
+ break;
+
+ case DPK_AGC_STEP_GAIN_ADJ:
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
+ RFREG_MASKRXBB);
+ offset = _dpk_dgain_mapping(rtwdev, dgain);
+
+ if (tmp_rxbb + offset > 0x1f) {
+ tmp_rxbb = 0x1f;
+ limited_rxbb = 1;
+ } else if (tmp_rxbb + offset < 0) {
+ tmp_rxbb = 0;
+ limited_rxbb = 1;
+ } else {
+ tmp_rxbb = tmp_rxbb + offset;
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
+ tmp_rxbb);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
+ if (offset || agc_cnt == 0) {
+ if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
+ _dpk_bypass_rxcfir(rtwdev, path, true);
+ else
+ _dpk_lbk_rxiqk(rtwdev, phy, path);
+ }
+ if (dgain > 1922 || dgain < 342)
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ else
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GAIN_LOSS_IDX:
+ _dpk_gainloss(rtwdev, phy, path, kidx);
+ tmp_gl_idx = _dpk_gainloss_read(rtwdev);
+
+ if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
+ tmp_gl_idx >= 7)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else
+ step = DPK_AGC_STEP_SET_TX_GAIN;
+ break;
+
+ case DPK_AGC_STEP_GL_GT_CRITERION:
+ if (tmp_txagc == 0x2e) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@lower bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GL_LT_CRITERION:
+ if (tmp_txagc == 0x3f) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@upper bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+ case DPK_AGC_STEP_SET_TX_GAIN:
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
+ goout = 1;
+ agc_cnt++;
+ break;
+
+ default:
+ goout = 1;
+ break;
+ }
+ } while (!goout && agc_cnt < 6 && limit-- > 0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
+ tmp_rxbb);
+
+ return tmp_txagc;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
+{
+ switch (order) {
+ case 0:
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
+ break;
+ case 1:
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
+ rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
+ break;
+ case 2:
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
+ rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Set MDPD order to 0x%x for IDL\n", order);
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
+ dpk->bp[path][kidx].band == RTW89_BAND_5G)
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ else
+ _dpk_set_mdpd_para(rtwdev, 0x0);
+
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+}
+
+static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ const u16 pwsf = 0x78;
+ u8 gs = dpk->dpk_gs[phy];
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, kidx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
+ pwsf, gs);
+
+ dpk->bp[path][kidx].txagc_dpk = txagc;
+ rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
+ 0x3F << ((gain << 3) + (kidx << 4)), txagc);
+
+ dpk->bp[path][kidx].pwsf = pwsf;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x1FF << (gain << 4), pwsf);
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
+
+ dpk->bp[path][kidx].gs = gs;
+ if (dpk->dpk_gs[phy] == 0x7f)
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x007f7f7f);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x005b5b5b);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ bool is_reload = false;
+ u8 idx, cur_band, cur_ch;
+
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
+
+ for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+ if (cur_band != dpk->bp[path][idx].band ||
+ cur_ch != dpk->bp[path][idx].ch)
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, idx);
+ dpk->cur_idx[path] = idx;
+ is_reload = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] reload S%d[%d] success\n", path, idx);
+ }
+
+ return is_reload;
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 txagc = 0x38, kidx = dpk->cur_idx[path];
+ bool is_fail = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
+
+ _rfk_rf_direct_cntrl(rtwdev, path, false);
+ _rfk_drf_direct_cntrl(rtwdev, path, false);
+
+ _dpk_kip_pwr_clk_on(rtwdev, path);
+ _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+ _dpk_rf_setting(rtwdev, gain, path, kidx);
+ _dpk_rx_dck(rtwdev, phy, path);
+
+ _dpk_kip_preset(rtwdev, phy, path, kidx);
+ _dpk_kip_set_rxagc(rtwdev, phy, path);
+ _dpk_table_select(rtwdev, path, kidx, gain);
+
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
+
+ if (txagc == 0xff) {
+ is_fail = true;
+ } else {
+ _dpk_get_thermal(rtwdev, kidx, path);
+
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
+ }
+
+ if (!is_fail)
+ dpk->bp[path][kidx].path_ok = true;
+ else
+ dpk->bp[path][kidx].path_ok = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+ is_fail ? "Check" : "Success");
+
+ return is_fail;
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy, u8 kpath)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
+ u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
+ u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
+ u8 path;
+
+ if (dpk->is_dpk_reload_en) {
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ if (!reloaded[path] && dpk->bp[path][0].ch)
+ dpk->cur_idx[path] = !dpk->cur_idx[path];
+ else
+ _dpk_onoff(rtwdev, path, false);
+ }
+ } else {
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
+ dpk->cur_idx[path] = 0;
+ }
+
+ _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
+
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+ _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ _dpk_information(rtwdev, phy, path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, true);
+ }
+
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+ is_fail = _dpk_main(rtwdev, phy, path, 1);
+ _dpk_onoff(rtwdev, path, is_fail);
+ }
+
+ _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+ _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
+
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+ _dpk_kip_restore(rtwdev, path);
+ _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, false);
+ }
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_fem_info *fem = &rtwdev->fem;
+
+ if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+ if (kpath & BIT(path))
+ _dpk_onoff(rtwdev, path, true);
+ }
+}
+
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
+ RTW8852B_DPK_VER, rtwdev->hal.cv,
+ RTW8852B_RF_REL_VERSION);
+
+ if (_dpk_bypass_check(rtwdev, phy))
+ _dpk_force_bypass(rtwdev, phy);
+ else
+ _dpk_cal_select(rtwdev, force, phy, RF_AB);
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
+ s8 delta_ther[2] = {};
+ u8 trk_idx, txagc_rf;
+ u8 path, kidx;
+ u16 pwsf[2];
+ u8 cur_ther;
+ u32 tmp;
+
+ for (path = 0; path < RF_PATH_NUM_8852B; path++) {
+ kidx = dpk->cur_idx[path];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+ path, kidx, dpk->bp[path][kidx].ch);
+
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+ if (dpk->bp[path][kidx].ch && cur_ther)
+ delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+ delta_ther[path] = delta_ther[path] * 3 / 2;
+ else
+ delta_ther[path] = delta_ther[path] * 5 / 2;
+
+ txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ 0x0000003f);
+
+ if (rtwdev->is_tssi_mode[path]) {
+ trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
+ txagc_rf, trk_idx);
+
+ txagc_bb =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ MASKBYTE2);
+ txagc_bb_tp =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
+ B_TXAGC_TP);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+ txagc_bb_tp, txagc_bb);
+
+ txagc_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ MASKBYTE3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
+ txagc_ofst, delta_ther[path]);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
+ B_DPD_COM_OF);
+ if (tmp == 0x1) {
+ txagc_ofst = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] HW txagc offset mode\n");
+ }
+
+ if (txagc_rf && cur_ther)
+ ini_diff = txagc_ofst + (delta_ther[path]);
+
+ tmp = rtw89_phy_read32_mask(rtwdev,
+ R_P0_TXDPD + (path << 13),
+ B_P0_TXDPD);
+ if (tmp == 0x0) {
+ pwsf[0] = dpk->bp[path][kidx].pwsf +
+ txagc_bb_tp - txagc_bb + ini_diff;
+ pwsf[1] = dpk->bp[path][kidx].pwsf +
+ txagc_bb_tp - txagc_bb + ini_diff;
+ } else {
+ pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
+ pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
+ }
+
+ } else {
+ pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ }
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
+ if (!tmp && txagc_rf) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
+ pwsf[0], pwsf[1]);
+
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_BND + (path << 8) + (kidx << 2),
+ B_DPD_BND_0, pwsf[0]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_BND + (path << 8) + (kidx << 2),
+ B_DPD_BND_1, pwsf[1]);
+ }
+ }
+}
+
+static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 tx_scale, ofdm_bkof, path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
+ tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
+
+ if (ofdm_bkof + tx_scale >= 44) {
+ /* move dpd backoff to bb, and set dpd backoff to 0 */
+ dpk->dpk_gs[phy] = 0x7f;
+ for (path = 0; path < RF_PATH_NUM_8852B; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
+ B_DPD_CFG, 0x7f7f7f);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Set S%d DPD backoff to 0dB\n", path);
+ }
+ } else {
+ dpk->dpk_gs[phy] = 0x5b;
+ }
+}
+
+static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
+}
+
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852b_tssi_sys_a_defs_2g_tbl,
+ &rtw8852b_tssi_sys_a_defs_5g_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852b_tssi_sys_b_defs_2g_tbl,
+ &rtw8852b_tssi_sys_b_defs_5g_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_tssi_init_txpwr_defs_a_tbl,
+ &rtw8852b_tssi_init_txpwr_defs_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl,
+ &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl);
+}
+
+static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_tssi_dck_defs_a_tbl,
+ &rtw8852b_tssi_dck_defs_b_tbl);
+}
+
+static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RTW8852B_TSSI_GET_VAL(ptr, idx) \
+({ \
+ s8 *__ptr = (ptr); \
+ u8 __idx = (idx), __i, __v; \
+ u32 __val = 0; \
+ for (__i = 0; __i < 4; __i++) { \
+ __v = (__ptr[__idx + __i]); \
+ __val |= (__v << (8 * __i)); \
+ } \
+ __val; \
+})
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
+ u8 thermal = 0xff;
+ s8 thm_ofst[64] = {0};
+ u32 tmp = 0;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n;
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2];
+ break;
+ }
+
+ if (path == RF_PATH_A) {
+ thermal = tssi_info->thermal[RF_PATH_A];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ R_P0_TSSI_BASE + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_a[i++] :
+ -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_a[i++] :
+ thm_up_a[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
+
+ } else {
+ thermal = tssi_info->thermal[RF_PATH_B];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_b[i++] :
+ -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_b[i++] :
+ thm_up_b[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
+ }
+#undef RTW8852B_TSSI_GET_VAL
+}
+
+static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_tssi_dac_gain_defs_a_tbl,
+ &rtw8852b_tssi_dac_gain_defs_b_tbl);
+}
+
+static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852b_tssi_slope_a_defs_2g_tbl,
+ &rtw8852b_tssi_slope_a_defs_5g_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852b_tssi_slope_b_defs_2g_tbl,
+ &rtw8852b_tssi_slope_b_defs_5g_tbl);
+}
+
+static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, bool all)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ const struct rtw89_rfk_tbl *tbl = NULL;
+ u8 ch = chan->channel;
+
+ if (path == RF_PATH_A) {
+ if (band == RTW89_BAND_2G) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl;
+ } else if (ch >= 36 && ch <= 64) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl;
+ } else if (ch >= 100 && ch <= 144) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl;
+ } else if (ch >= 149 && ch <= 177) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl;
+ }
+ } else {
+ if (ch >= 1 && ch <= 14) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl;
+ } else if (ch >= 36 && ch <= 64) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl;
+ } else if (ch >= 100 && ch <= 144) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl;
+ } else if (ch >= 149 && ch <= 177) {
+ if (all)
+ tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl;
+ else
+ tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl;
+ }
+ }
+
+ if (tbl)
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852b_tssi_slope_defs_a_tbl,
+ &rtw8852b_tssi_slope_defs_b_tbl);
+}
+
+static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
+}
+
+static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
+ path);
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010);
+}
+
+static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852B; i++) {
+ _tssi_set_tssi_track(rtwdev, phy, i);
+ _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
+
+ if (i == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
+ B_P0_TSSI_MV_CLR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
+ B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
+ B_P0_TSSI_EN, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
+ RR_TXGA_V1_TRK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_RFC, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT_EN, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = true;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
+ B_P1_TSSI_MV_CLR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
+ B_P1_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
+ B_P1_TSSI_EN, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
+ RR_TXGA_V1_TRK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_RFC, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT_EN, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_B] = true;
+ }
+ }
+}
+
+static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+}
+
+static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define TSSI_EXTRA_GROUP_BIT (BIT(31))
+#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
+#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u32 gidx, gidx_1st, gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ s8 val;
+
+ gidx = _tssi_get_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u32 tgidx, tgidx_1st, tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ s8 val;
+
+ tgidx = _tssi_get_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 gidx;
+ s8 ofdm_de;
+ s8 trim_de;
+ s32 val;
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
+ gidx = _tssi_get_cck_group(rtwdev, ch);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = tssi_info->tssi_cck[i][gidx] + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
+ i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_cck_long[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
+ _TSSI_DE_MASK));
+
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = ofdm_de + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
+ i, ofdm_de, trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_mcs_20m[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
+ _TSSI_DE_MASK));
+ }
+}
+
+static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
+ "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
+ R_TSSI_PA_K1 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD),
+ R_TSSI_PA_K2 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD),
+ R_P0_TSSI_ALIM1 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD),
+ R_P0_TSSI_ALIM3 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD),
+ R_TSSI_PA_K5 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD),
+ R_P0_TSSI_ALIM2 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD),
+ R_P0_TSSI_ALIM4 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD),
+ R_TSSI_PA_K8 + (path << 13),
+ rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD));
+}
+
+static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 channel = chan->channel;
+ u8 band;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s phy=%d path=%d\n", __func__, phy, path);
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ if (tssi_info->alignment_done[path][band]) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][0]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][1]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][2]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][3]);
+ }
+
+ _tssi_alimentk_dump_result(rtwdev, path);
+}
+
+static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
+ u8 enable)
+{
+ enum rtw89_rf_path_bit rx_path;
+
+ if (path == RF_PATH_A)
+ rx_path = RF_A;
+ else if (path == RF_PATH_B)
+ rx_path = RF_B;
+ else if (path == RF_PATH_AB)
+ rx_path = RF_AB;
+ else
+ rx_path = RF_ABCD; /* don't change path, but still set others */
+
+ if (enable) {
+ rtw8852b_bb_set_plcp_tx(rtwdev);
+ rtw8852b_bb_cfg_tx_path(rtwdev, path);
+ rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy);
+ }
+
+ rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+}
+
+static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
+ reg_backup[i]);
+ }
+}
+
+static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
+ reg_backup[i]);
+ }
+}
+
+static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+{
+ u8 channel_index;
+
+ if (channel >= 1 && channel <= 14)
+ channel_index = channel - 1;
+ else if (channel >= 36 && channel <= 64)
+ channel_index = (channel - 36) / 2 + 14;
+ else if (channel >= 100 && channel <= 144)
+ channel_index = ((channel - 100) / 2) + 15 + 14;
+ else if (channel >= 149 && channel <= 177)
+ channel_index = ((channel - 149) / 2) + 38 + 14;
+ else
+ channel_index = 0;
+
+ return channel_index;
+}
+
+static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, const s16 *power,
+ u32 *tssi_cw_rpt)
+{
+ u32 tx_counter, tx_counter_tmp;
+ const int retry = 100;
+ u32 tmp;
+ int j, k;
+
+ for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
+ rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
+
+ tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
+ _tssi_trigger[path], tmp, path);
+
+ if (j == 0)
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ else
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+
+ tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] First HWTXcounter=%d path=%d\n",
+ tx_counter_tmp, path);
+
+ for (k = 0; k < retry; k++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
+ B_TSSI_CWRPT_RDY);
+ if (tmp)
+ break;
+
+ udelay(30);
+
+ tx_counter_tmp =
+ rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
+ k, tx_counter_tmp, path);
+ }
+
+ if (k >= retry) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
+ k, path);
+
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ return false;
+ }
+
+ tssi_cw_rpt[j] =
+ rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
+
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+
+ tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
+ tx_counter_tmp, path);
+ }
+
+ return true;
+}
+
+static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
+ 0x78e4, 0x49c0, 0x0d18, 0x0d80};
+ static const s16 power_2g[4] = {48, 20, 4, 4};
+ static const s16 power_5g[4] = {48, 20, 4, 4};
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
+ u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
+ u8 channel = chan->channel;
+ u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
+ struct rtw8852b_bb_tssi_bak tssi_bak;
+ s32 aliment_diff, tssi_cw_default;
+ u32 start_time, finish_time;
+ u32 bb_reg_backup[8] = {0};
+ const s16 *power;
+ u8 band;
+ bool ok;
+ u32 tmp;
+ u8 j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s channel=%d path=%d\n", __func__, channel,
+ path);
+
+ if (tssi_info->check_backup_aligmk[path][ch_idx]) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
+ tssi_info->alignment_backup_by_ch[path][ch_idx][0]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
+ tssi_info->alignment_backup_by_ch[path][ch_idx][1]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
+ tssi_info->alignment_backup_by_ch[path][ch_idx][2]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
+ tssi_info->alignment_backup_by_ch[path][ch_idx][3]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s Reload TSSI Alignment !!!\n", __func__);
+ _tssi_alimentk_dump_result(rtwdev, path);
+ return;
+ }
+
+ start_time = ktime_get_ns();
+
+ if (chan->band_type == RTW89_BAND_2G)
+ power = power_2g;
+ else
+ power = power_5g;
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak);
+ _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
+
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ if (!ok)
+ goto out;
+
+ for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
+ power[j], j, tssi_cw_rpt[j]);
+ }
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
+ _tssi_cw_default_mask[1]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
+ tssi_cw_rpt[1] + tssi_cw_default;
+ aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
+ _tssi_cw_default_mask[2]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
+ _tssi_cw_default_mask[3]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
+
+ if (path == RF_PATH_A) {
+ tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
+ FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
+ FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
+ } else {
+ tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
+ FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
+ FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
+ }
+
+ tssi_info->alignment_done[path][band] = true;
+ tssi_info->alignment_value[path][band][0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][1] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][2] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][3] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
+
+ tssi_info->check_backup_aligmk[path][ch_idx] = true;
+ tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM1 + (path << 13),
+ tssi_info->alignment_value[path][band][0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM3 + (path << 13),
+ tssi_info->alignment_value[path][band][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM2 + (path << 13),
+ tssi_info->alignment_value[path][band][2]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM4 + (path << 13),
+ tssi_info->alignment_value[path][band][3]);
+
+out:
+ _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
+ rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0);
+
+ finish_time = ktime_get_ns();
+ tssi_info->tssi_alimk_time += finish_time - start_time;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] %s processing time = %d ms\n", __func__,
+ tssi_info->tssi_alimk_time);
+}
+
+void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
+{
+ _set_dpd_backoff(rtwdev, RTW89_PHY_0);
+}
+
+void rtw8852b_rck(struct rtw89_dev *rtwdev)
+{
+ u8 path;
+
+ for (path = 0; path < RF_PATH_NUM_8852B; path++)
+ _rck(rtwdev, path);
+}
+
+void rtw8852b_dack(struct rtw89_dev *rtwdev)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
+ _dac_cal(rtwdev, false);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
+}
+
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _rx_dck(rtwdev, phy_idx);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
+}
+
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ rtwdev->dpk.is_dpk_enable = true;
+ rtwdev->dpk.is_dpk_reload_en = false;
+ _dpk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
+}
+
+void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
+{
+ _dpk_track(rtwdev);
+}
+
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ u32 tx_en;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_dac_gain_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_tssi_slope(rtwdev, phy, i);
+
+ rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _tmac_tx_pause(rtwdev, phy, true);
+ if (hwtx_en)
+ _tssi_alimentk(rtwdev, phy, i);
+ _tmac_tx_pause(rtwdev, phy, false);
+ rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 channel = chan->channel;
+ u8 band;
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s phy=%d channel=%d\n", __func__, phy, channel);
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+
+ if (tssi_info->alignment_done[i][band])
+ _tssi_alimentk_done(rtwdev, phy, i);
+ else
+ _tssi_alignment_default(rtwdev, phy, i, true);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 channel = chan->channel;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
+ __func__, channel);
+
+ if (enable) {
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ rtw8852b_tssi(rtwdev, phy, true);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
+ __func__,
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
+
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
+ __func__,
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s SCAN_END\n", __func__);
+}
+
+void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (scan_start)
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
+ else
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
+}
+
+static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw89_bandwidth bw, bool dav)
+{
+ u32 rf_reg18;
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ if (rf_reg18 == INV_RF_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+ rf_reg18 &= ~RR_CFGCH_BW;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
+ }
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
+ bw, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ _bw_setting(rtwdev, RF_PATH_A, bw, true);
+ _bw_setting(rtwdev, RF_PATH_B, bw, true);
+ _bw_setting(rtwdev, RF_PATH_A, bw, false);
+ _bw_setting(rtwdev, RF_PATH_B, bw, false);
+}
+
+static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
+{
+ u32 bak;
+ u32 tmp;
+ int ret;
+
+ bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
+ false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
+
+ return !!ret;
+}
+
+static void _lck_check(struct rtw89_dev *rtwdev)
+{
+ u32 tmp;
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
+ }
+
+ udelay(10);
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ }
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
+
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
+ }
+}
+
+static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
+{
+ bool timeout;
+
+ timeout = _set_s0_arfc18(rtwdev, val);
+ if (!timeout)
+ _lck_check(rtwdev);
+}
+
+static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 central_ch, bool dav)
+{
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+ bool is_2g_ch = central_ch <= 14;
+ u32 rf_reg18;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
+ RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
+
+ if (!is_2g_ch)
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
+ FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+
+ if (path == RF_PATH_A && dav)
+ _set_ch(rtwdev, rf_reg18);
+ else
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
+ central_ch, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
+{
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
+ _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
+}
+
+static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
+
+ if (bw == RTW89_CHANNEL_WIDTH_20)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
+ else if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
+ else if (bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
+ else
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
+}
+
+static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ u8 kpath, path;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RF_PATH_NUM_8852B; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ _set_rxbb_bw(rtwdev, bw, path);
+ }
+}
+
+static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 central_ch,
+ enum rtw89_band band, enum rtw89_bandwidth bw)
+{
+ _ctrl_ch(rtwdev, central_ch);
+ _ctrl_bw(rtwdev, phy, bw);
+ _rxbb_bw(rtwdev, phy, bw);
+}
+
+void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
new file mode 100644
index 000000000000..f52832065600
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852B_RFK_H__
+#define __RTW89_8852B_RFK_H__
+
+#include "core.h"
+
+void rtw8852b_rck(struct rtw89_dev *rtwdev);
+void rtw8852b_dack(struct rtw89_dev *rtwdev);
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_dpk_init(struct rtw89_dev *rtwdev);
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_dpk_track(struct rtw89_dev *rtwdev);
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx);
+void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c
new file mode 100644
index 000000000000..0b8a210bb10b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c
@@ -0,0 +1,794 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "rtw8852b_rfk_table.h"
+
+static const struct rtw89_reg5_def rtw8852b_afe_init_defs[] = {
+ RTW89_DECL_RFK_WM(0xC0D4, 0xffffffff, 0x4486888c),
+ RTW89_DECL_RFK_WM(0xC0D8, 0xffffffff, 0xc6ba10e0),
+ RTW89_DECL_RFK_WM(0xc0dc, 0xffffffff, 0x30c52868),
+ RTW89_DECL_RFK_WM(0xc0e0, 0xffffffff, 0x05008128),
+ RTW89_DECL_RFK_WM(0xc0e4, 0xffffffff, 0x0000272b),
+ RTW89_DECL_RFK_WM(0xC1D4, 0xffffffff, 0x4486888c),
+ RTW89_DECL_RFK_WM(0xC1D8, 0xffffffff, 0xc6ba10e0),
+ RTW89_DECL_RFK_WM(0xc1dc, 0xffffffff, 0x30c52868),
+ RTW89_DECL_RFK_WM(0xc1e0, 0xffffffff, 0x05008128),
+ RTW89_DECL_RFK_WM(0xc1e4, 0xffffffff, 0x0000272b),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_afe_init_defs);
+
+static const struct rtw89_reg5_def rtw8852b_check_addc_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x20f4, BIT(24), 0x0),
+ RTW89_DECL_RFK_WM(0x20f8, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x20f0, 0xff0000, 0x1),
+ RTW89_DECL_RFK_WM(0x20f0, 0xf00, 0x2),
+ RTW89_DECL_RFK_WM(0x20f0, 0xf, 0x0),
+ RTW89_DECL_RFK_WM(0x20f0, 0xc0, 0x2),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_check_addc_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_check_addc_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x20f4, BIT(24), 0x0),
+ RTW89_DECL_RFK_WM(0x20f8, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x20f0, 0xff0000, 0x1),
+ RTW89_DECL_RFK_WM(0x20f0, 0xf00, 0x2),
+ RTW89_DECL_RFK_WM(0x20f0, 0xf, 0x0),
+ RTW89_DECL_RFK_WM(0x20f0, 0xc0, 0x3),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_check_addc_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_check_dadc_en_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x032C, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0x030C, 0x0f000000, 0xf),
+ RTW89_DECL_RFK_WM(0x030C, 0x0f000000, 0x3),
+ RTW89_DECL_RFK_WM(0x032C, BIT(16), 0x0),
+ RTW89_DECL_RFK_WM(0x12dc, BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0x12e8, BIT(2), 0x1),
+ RTW89_DECL_RFK_WRF(RF_PATH_A, 0x8f, BIT(13), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_check_dadc_en_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_check_dadc_en_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x032C, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0x030C, 0x0f000000, 0xf),
+ RTW89_DECL_RFK_WM(0x030C, 0x0f000000, 0x3),
+ RTW89_DECL_RFK_WM(0x032C, BIT(16), 0x0),
+ RTW89_DECL_RFK_WM(0x32dc, BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0x32e8, BIT(2), 0x1),
+ RTW89_DECL_RFK_WRF(RF_PATH_B, 0x8f, BIT(13), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_check_dadc_en_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_check_dadc_dis_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x12dc, BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0x12e8, BIT(2), 0x0),
+ RTW89_DECL_RFK_WRF(RF_PATH_A, 0x8f, BIT(13), 0x0),
+ RTW89_DECL_RFK_WM(0x032C, BIT(16), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_check_dadc_dis_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_check_dadc_dis_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x32dc, BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0x32e8, BIT(2), 0x0),
+ RTW89_DECL_RFK_WRF(RF_PATH_B, 0x8f, BIT(13), 0x0),
+ RTW89_DECL_RFK_WM(0x032C, BIT(16), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_check_dadc_dis_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_dack_s0_1_defs[] = {
+ RTW89_DECL_RFK_WM(0x12A0, BIT(15), 0x1),
+ RTW89_DECL_RFK_WM(0x12A0, 0x00007000, 0x3),
+ RTW89_DECL_RFK_WM(0x12B8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030C, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x032C, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0xC0D8, BIT(16), 0x1),
+ RTW89_DECL_RFK_WM(0xc0dc, 0x0c000000, 0x3),
+ RTW89_DECL_RFK_WM(0xC004, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0xc024, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0xC004, 0x3ff00000, 0x30),
+ RTW89_DECL_RFK_WM(0xC004, 0xc0000000, 0x0),
+ RTW89_DECL_RFK_WM(0xC004, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc024, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc00c, BIT(2), 0x0),
+ RTW89_DECL_RFK_WM(0xc02c, BIT(2), 0x0),
+ RTW89_DECL_RFK_WM(0xC004, BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0xc024, BIT(0), 0x1),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dack_s0_1_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dack_s0_2_defs[] = {
+ RTW89_DECL_RFK_WM(0xc0dc, 0x0c000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc00c, BIT(2), 0x1),
+ RTW89_DECL_RFK_WM(0xc02c, BIT(2), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dack_s0_2_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dack_s0_3_defs[] = {
+ RTW89_DECL_RFK_WM(0xC004, BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0xc024, BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0xC0D8, BIT(16), 0x0),
+ RTW89_DECL_RFK_WM(0x12A0, BIT(15), 0x0),
+ RTW89_DECL_RFK_WM(0x12A0, 0x00007000, 0x7),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dack_s0_3_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dack_s1_1_defs[] = {
+ RTW89_DECL_RFK_WM(0x32a0, BIT(15), 0x1),
+ RTW89_DECL_RFK_WM(0x32a0, 0x7000, 0x3),
+ RTW89_DECL_RFK_WM(0x32B8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030C, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x032C, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0xC1D8, BIT(16), 0x1),
+ RTW89_DECL_RFK_WM(0xc1dc, 0x0c000000, 0x3),
+ RTW89_DECL_RFK_WM(0xc104, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0xc124, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0xc104, 0x3ff00000, 0x30),
+ RTW89_DECL_RFK_WM(0xc104, 0xc0000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc104, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc124, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc10c, BIT(2), 0x0),
+ RTW89_DECL_RFK_WM(0xc12c, BIT(2), 0x0),
+ RTW89_DECL_RFK_WM(0xc104, BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0xc124, BIT(0), 0x1),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dack_s1_1_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dack_s1_2_defs[] = {
+ RTW89_DECL_RFK_WM(0xc1dc, 0x0c000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc10c, BIT(2), 0x1),
+ RTW89_DECL_RFK_WM(0xc12c, BIT(2), 0x1),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dack_s1_2_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dack_s1_3_defs[] = {
+ RTW89_DECL_RFK_WM(0xc104, BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0xc124, BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0xC1D8, BIT(16), 0x0),
+ RTW89_DECL_RFK_WM(0x32a0, BIT(15), 0x0),
+ RTW89_DECL_RFK_WM(0x32a0, 0x7000, 0x7),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dack_s1_3_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dpk_afe_defs[] = {
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x0303),
+ RTW89_DECL_RFK_WM(0x12b8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x32b8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x13),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041),
+ RTW89_DECL_RFK_WM(0x12b8, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x58c8, BIT(24), 0x1),
+ RTW89_DECL_RFK_WM(0x78c8, BIT(24), 0x1),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x3),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x3),
+ RTW89_DECL_RFK_WM(0x2008, 0x01FFFFFF, 0x1ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, BIT(2), 0x1),
+ RTW89_DECL_RFK_WM(0x0700, BIT(27), 0x1),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003FF, 0x3ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x3),
+ RTW89_DECL_RFK_WM(0x0c6c, BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0x58ac, BIT(27), 0x1),
+ RTW89_DECL_RFK_WM(0x78ac, BIT(27), 0x1),
+ RTW89_DECL_RFK_WM(0x0c3c, BIT(9), 0x1),
+ RTW89_DECL_RFK_WM(0x2344, BIT(31), 0x1),
+ RTW89_DECL_RFK_WM(0x4490, BIT(31), 0x1),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0xbf),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000f0000, 0xb),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x5),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x3333),
+ RTW89_DECL_RFK_WM(0x580c, BIT(15), 0x1),
+ RTW89_DECL_RFK_WM(0x5800, 0x0000ffff, 0x0000),
+ RTW89_DECL_RFK_WM(0x780c, BIT(15), 0x1),
+ RTW89_DECL_RFK_WM(0x7800, 0x0000ffff, 0x0000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dpk_afe_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dpk_afe_restore_defs[] = {
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x0303),
+ RTW89_DECL_RFK_WM(0x12b8, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0x32b8, BIT(30), 0x0),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x2008, 0x01FFFFFF, 0x0),
+ RTW89_DECL_RFK_WM(0x0c1c, BIT(2), 0x0),
+ RTW89_DECL_RFK_WM(0x0700, BIT(27), 0x0),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003FF, 0x63),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000FF000, 0x00),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000FF000, 0x00),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5864, BIT(29), 0x0),
+ RTW89_DECL_RFK_WM(0x7864, BIT(29), 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x0000),
+ RTW89_DECL_RFK_WM(0x58c8, BIT(24), 0x0),
+ RTW89_DECL_RFK_WM(0x78c8, BIT(24), 0x0),
+ RTW89_DECL_RFK_WM(0x0c3c, BIT(9), 0x0),
+ RTW89_DECL_RFK_WM(0x580c, BIT(15), 0x0),
+ RTW89_DECL_RFK_WM(0x58e4, 0x18000000, 0x1),
+ RTW89_DECL_RFK_WM(0x58e4, 0x18000000, 0x2),
+ RTW89_DECL_RFK_WM(0x780c, BIT(15), 0x0),
+ RTW89_DECL_RFK_WM(0x78e4, 0x18000000, 0x1),
+ RTW89_DECL_RFK_WM(0x78e4, 0x18000000, 0x2),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dpk_afe_restore_defs);
+
+static const struct rtw89_reg5_def rtw8852b_dpk_kip_defs[] = {
+ RTW89_DECL_RFK_WM(0x8008, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x8088, 0xffffffff, 0x80000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_dpk_kip_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_sys_defs[] = {
+ RTW89_DECL_RFK_WM(0x12a8, 0x0000000f, 0x5),
+ RTW89_DECL_RFK_WM(0x32a8, 0x0000000f, 0x5),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0x5555),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0x5555),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16),
+ RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x19),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_sys_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_sys_a_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_sys_a_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_sys_a_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_sys_a_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_sys_b_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_sys_b_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_sys_b_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_sys_b_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_init_txpwr_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x002d000),
+ RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x1dc80280),
+ RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00002080),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_init_txpwr_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_init_txpwr_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x002d000),
+ RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x1dc80280),
+ RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00002080),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_init_txpwr_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_init_txpwr_he_tb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_init_txpwr_he_tb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_init_txpwr_he_tb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_init_txpwr_he_tb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_dck_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x0ef),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_dck_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_dck_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x0ef),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_dck_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_dac_gain_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000400, 0x1),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x000),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_dac_gain_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_dac_gain_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x000),
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_dac_gain_defs_b);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_slope_a_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0804008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_slope_a_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_slope_a_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081e08),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_slope_a_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_slope_b_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0804008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_slope_b_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_slope_b_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081e08),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_slope_b_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_2g_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01ef27af),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000075),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x017f13ae),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x0000006e),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_2g_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_2g_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01ef27af),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000075),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x017f13ae),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x0000006e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_2g_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_5g1_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x016037e7),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x0000006f),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_5g1_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_5g1_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x016037e7),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x0000006f),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_5g1_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_5g2_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01f053f1),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000070),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_5g2_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_5g2_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01f053f1),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000070),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_5g2_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_5g3_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01c047ee),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000070),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_5g3_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_a_5g3_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01c047ee),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000070),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_a_5g3_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_2g_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01ff2bb5),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000078),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x018f2bb0),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000072),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_2g_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_2g_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01ff2bb5),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000078),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x018f2bb0),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000072),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_2g_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_5g1_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x009003da),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_5g1_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_5g1_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x009003da),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_5g1_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_5g2_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x013027e6),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_5g2_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_5g2_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x013027e6),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_5g2_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_5g3_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x009003da),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_5g3_all_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_align_b_5g3_part_defs[] = {
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x009003da),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_align_b_5g3_part_defs);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852b_tssi_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852b_tssi_slope_defs_b);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h
new file mode 100644
index 000000000000..b4d6e9851ff9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852B_RFK_TABLE_H__
+#define __RTW89_8852B_RFK_TABLE_H__
+
+#include "phy.h"
+
+extern const struct rtw89_rfk_tbl rtw8852b_afe_init_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_check_addc_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_check_addc_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_check_dadc_en_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_check_dadc_en_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_check_dadc_dis_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_check_dadc_dis_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dack_s0_1_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dack_s0_2_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dack_s0_3_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dack_s1_1_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dack_s1_2_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dack_s1_3_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dpk_afe_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dpk_afe_restore_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_dpk_kip_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_sys_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_sys_a_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_sys_a_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_sys_b_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_sys_b_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_init_txpwr_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_init_txpwr_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_dck_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_dck_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_dac_gain_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_dac_gain_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_slope_a_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_slope_a_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_slope_b_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_slope_b_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_2g_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_2g_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_5g1_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_5g1_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_5g2_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_5g2_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_5g3_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_a_5g3_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_2g_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_2g_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_5g1_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_5g1_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_5g2_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_5g2_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_5g3_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_align_b_5g3_part_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852b_tssi_slope_defs_b_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
new file mode 100644
index 000000000000..a6734965361f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
@@ -0,0 +1,22877 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852b_table.h"
+
+static const struct rtw89_reg2_def rtw89_8852b_phy_bb_regs[] = {
+ {0x704, 0x601E0100},
+ {0x4000, 0x00000000},
+ {0x4004, 0xCA014000},
+ {0x4008, 0xC751D4F0},
+ {0x400C, 0x44511475},
+ {0x4010, 0x00000000},
+ {0x4014, 0x00000000},
+ {0x4018, 0x4F4C084B},
+ {0x401C, 0x084A4E52},
+ {0x4020, 0x4D504E4B},
+ {0x4024, 0x4F4C0849},
+ {0x4028, 0x08484C50},
+ {0x402C, 0x4C50504C},
+ {0x4030, 0x5454084A},
+ {0x4034, 0x084B5654},
+ {0x4038, 0x6A6C605A},
+ {0x403C, 0x4C4C084C},
+ {0x4040, 0x084B4E4D},
+ {0x4044, 0x4E4C4B4B},
+ {0x4048, 0x4B4B084A},
+ {0x404C, 0x084A4E4C},
+ {0x4050, 0x514F4C4A},
+ {0x4054, 0x524E084A},
+ {0x4058, 0x084A5154},
+ {0x405C, 0x53555554},
+ {0x4060, 0x45450845},
+ {0x4064, 0x08454144},
+ {0x4068, 0x40434445},
+ {0x406C, 0x44450845},
+ {0x4070, 0x08444043},
+ {0x4074, 0x42434444},
+ {0x4078, 0x46450844},
+ {0x407C, 0x08444843},
+ {0x4080, 0x4B4E4A47},
+ {0x4084, 0x4F4C084B},
+ {0x4088, 0x084A4E52},
+ {0x408C, 0x4D504E4B},
+ {0x4090, 0x4F4C0849},
+ {0x4094, 0x08484C50},
+ {0x4098, 0x4C50504C},
+ {0x409C, 0x5454084A},
+ {0x40A0, 0x084B5654},
+ {0x40A4, 0x6A6C605A},
+ {0x40A8, 0x4C4C084C},
+ {0x40AC, 0x084B4E4D},
+ {0x40B0, 0x4E4C4B4B},
+ {0x40B4, 0x4B4B084A},
+ {0x40B8, 0x084A4E4C},
+ {0x40BC, 0x514F4C4A},
+ {0x40C0, 0x524E084A},
+ {0x40C4, 0x084A5154},
+ {0x40C8, 0x53555554},
+ {0x40CC, 0x45450845},
+ {0x40D0, 0x08454144},
+ {0x40D4, 0x40434445},
+ {0x40D8, 0x44450845},
+ {0x40DC, 0x08444043},
+ {0x40E0, 0x42434444},
+ {0x40E4, 0x46450844},
+ {0x40E8, 0x08444843},
+ {0x40EC, 0x4B4E4A47},
+ {0x40F0, 0x00000000},
+ {0x40F4, 0x00000006},
+ {0x40F8, 0x00000000},
+ {0x40FC, 0x8C30C30C},
+ {0x4100, 0x4C30C30C},
+ {0x4104, 0x0C30C30C},
+ {0x4108, 0x0C30C30C},
+ {0x410C, 0x0C30C30C},
+ {0x4110, 0x0C30C30C},
+ {0x4114, 0x28A28A28},
+ {0x4118, 0x28A28A28},
+ {0x411C, 0x28A28A28},
+ {0x4120, 0x28A28A28},
+ {0x4124, 0x28A28A28},
+ {0x4128, 0x28A28A28},
+ {0x412C, 0x06666666},
+ {0x4130, 0x33333333},
+ {0x4134, 0x33333333},
+ {0x4138, 0x33333333},
+ {0x413C, 0x00000031},
+ {0x4140, 0x5100600A},
+ {0x4144, 0x18363113},
+ {0x4148, 0x1D976DDC},
+ {0x414C, 0x1C072DD7},
+ {0x4150, 0x1127CDF4},
+ {0x4154, 0x1E37BDF1},
+ {0x4158, 0x1FB7F1D6},
+ {0x415C, 0x1EA7DDF9},
+ {0x4160, 0x1FE445DD},
+ {0x4164, 0x1F97F1FE},
+ {0x4168, 0x1FF781ED},
+ {0x416C, 0x1FA7F5FE},
+ {0x4170, 0x1E07B913},
+ {0x4174, 0x1FD7FDFF},
+ {0x4178, 0x1E17B9FA},
+ {0x417C, 0x19A66914},
+ {0x4180, 0x10F65598},
+ {0x4184, 0x14A5A111},
+ {0x4188, 0x1D3765DB},
+ {0x418C, 0x17C685CA},
+ {0x4190, 0x1107C5F3},
+ {0x4194, 0x1B5785EB},
+ {0x4198, 0x1F97ED8F},
+ {0x419C, 0x1BC7A5F3},
+ {0x41A0, 0x1FE43595},
+ {0x41A4, 0x1EB7D9FC},
+ {0x41A8, 0x1FE65DBE},
+ {0x41AC, 0x1EC7D9FC},
+ {0x41B0, 0x1976FCFF},
+ {0x41B4, 0x1F77F5FF},
+ {0x41B8, 0x1976FDEC},
+ {0x41BC, 0x198664EF},
+ {0x41C0, 0x11062D93},
+ {0x41C4, 0x10C4E910},
+ {0x41C8, 0x1CA759DB},
+ {0x41CC, 0x1335A9B5},
+ {0x41D0, 0x1097B9F3},
+ {0x41D4, 0x17B72DE1},
+ {0x41D8, 0x1F67ED42},
+ {0x41DC, 0x18074DE9},
+ {0x41E0, 0x1FD40547},
+ {0x41E4, 0x1D57ADF9},
+ {0x41E8, 0x1FE52182},
+ {0x41EC, 0x1D67B1F9},
+ {0x41F0, 0x14860CE1},
+ {0x41F4, 0x1EC7E9FE},
+ {0x41F8, 0x14860DD6},
+ {0x41FC, 0x195664C7},
+ {0x4200, 0x0005E58A},
+ {0x4204, 0x00000000},
+ {0x4208, 0x00000000},
+ {0x420C, 0x7A000000},
+ {0x4210, 0x0F9F3D7A},
+ {0x4214, 0x0040817C},
+ {0x4218, 0x00E10204},
+ {0x421C, 0x227D94CD},
+ {0x4220, 0x08028A28},
+ {0x4224, 0x00000210},
+ {0x4228, 0x04688000},
+ {0x4A48, 0x00000002},
+ {0x422C, 0x0060B002},
+ {0x4230, 0x9A8249A8},
+ {0x4234, 0x26A1469E},
+ {0x4238, 0x2099A824},
+ {0x423C, 0x2359461C},
+ {0x4240, 0x1631A675},
+ {0x4244, 0x2C6B1D63},
+ {0x4248, 0x0000000E},
+ {0x424C, 0x00000001},
+ {0x4250, 0x00000001},
+ {0x4254, 0x00000000},
+ {0x4258, 0x00000000},
+ {0x425C, 0x00000000},
+ {0x4260, 0x0020000C},
+ {0x4264, 0x00000000},
+ {0x4268, 0x00000000},
+ {0x426C, 0x0418317C},
+ {0x4270, 0x2B33135C},
+ {0x4274, 0x00000002},
+ {0x4278, 0x00000000},
+ {0x427C, 0x00000000},
+ {0x4280, 0x00000000},
+ {0x4284, 0x00000000},
+ {0x4288, 0x00000000},
+ {0x428C, 0x00000000},
+ {0x4290, 0x00000000},
+ {0x4294, 0x00000000},
+ {0x4298, 0x00000000},
+ {0x429C, 0x84026000},
+ {0x42A0, 0x0051AC20},
+ {0x4A24, 0x0010C040},
+ {0x42A4, 0x02024008},
+ {0x42A8, 0x00000000},
+ {0x42AC, 0x00000000},
+ {0x42B0, 0x22CE803C},
+ {0x42B4, 0x32000000},
+ {0x42B8, 0x996FD67D},
+ {0x42BC, 0xBD67D67D},
+ {0x42C0, 0x7D67D65B},
+ {0x42C4, 0x28029F59},
+ {0x42C8, 0x00280280},
+ {0x42CC, 0x00000000},
+ {0x42D0, 0x00000000},
+ {0x42D4, 0x00000003},
+ {0x42D8, 0x00000001},
+ {0x42DC, 0x61861800},
+ {0x42E0, 0x830C30C3},
+ {0x42E4, 0xC30C30C3},
+ {0x42E8, 0x830C30C3},
+ {0x42EC, 0x451450C3},
+ {0x42F0, 0x05145145},
+ {0x42F4, 0x05145145},
+ {0x42F8, 0x05145145},
+ {0x42FC, 0x0F0C3145},
+ {0x4300, 0x030C30CF},
+ {0x4304, 0x030C30C3},
+ {0x4308, 0x030CF3C3},
+ {0x430C, 0x030C30C3},
+ {0x4310, 0x0F3CF3C3},
+ {0x4314, 0x0F3CF3CF},
+ {0x4318, 0x0F3CF3CF},
+ {0x431C, 0x0F3CF3CF},
+ {0x4320, 0x0F3CF3CF},
+ {0x4324, 0x030C10C3},
+ {0x4328, 0x051430C3},
+ {0x432C, 0x051490CB},
+ {0x4330, 0x030CD151},
+ {0x4334, 0x050C50C7},
+ {0x4338, 0x051492CB},
+ {0x433C, 0x05145145},
+ {0x4340, 0x05145145},
+ {0x4344, 0x05145145},
+ {0x4348, 0x05145145},
+ {0x434C, 0x090CD3CF},
+ {0x4350, 0x071491C5},
+ {0x4354, 0x073CF143},
+ {0x4358, 0x071431C3},
+ {0x435C, 0x0F3CF1C5},
+ {0x4360, 0x0F3CF3CF},
+ {0x4364, 0x0F3CF3CF},
+ {0x4368, 0x0F3CF3CF},
+ {0x436C, 0x0F3CF3CF},
+ {0x4370, 0x090C91CF},
+ {0x4374, 0x11243143},
+ {0x4378, 0x9777A777},
+ {0x437C, 0xBB7BAC95},
+ {0x4380, 0xB667B889},
+ {0x4384, 0x7B9B8899},
+ {0x4388, 0x7A5567C8},
+ {0x438C, 0x2278CCCC},
+ {0x4390, 0x7C222222},
+ {0x4394, 0x0000069B},
+ {0x4398, 0x001CCCCC},
+ {0x4AAC, 0xCCCCC88C},
+ {0x4AB0, 0x0000AACC},
+ {0x439C, 0x00000000},
+ {0x43A0, 0x00000008},
+ {0x43A4, 0x00000000},
+ {0x43A8, 0x00000000},
+ {0x43AC, 0x00000000},
+ {0x43B0, 0x10000000},
+ {0x43B4, 0x00401001},
+ {0x43B8, 0x00061003},
+ {0x43BC, 0x000024D8},
+ {0x43C0, 0x00000000},
+ {0x43C4, 0x10000020},
+ {0x43C8, 0x20000200},
+ {0x43CC, 0x00000000},
+ {0x43D0, 0x04000000},
+ {0x43D4, 0x44000100},
+ {0x43D8, 0x60804060},
+ {0x43DC, 0x44204210},
+ {0x43E0, 0x82108082},
+ {0x43E4, 0x82108402},
+ {0x43E8, 0xC8082108},
+ {0x43EC, 0xC8202084},
+ {0x43F0, 0x44208208},
+ {0x43F4, 0x84108204},
+ {0x43F8, 0xD0108104},
+ {0x43FC, 0xF8210108},
+ {0x4400, 0x6431E930},
+ {0x4404, 0x02309468},
+ {0x4408, 0x10C61C22},
+ {0x440C, 0x02109469},
+ {0x4410, 0x10C61C22},
+ {0x4414, 0x00041049},
+ {0x4A4C, 0x00060581},
+ {0x4418, 0x00000000},
+ {0x441C, 0x00000000},
+ {0x4420, 0x6C000000},
+ {0x4424, 0xB0200020},
+ {0x4428, 0x00001FF0},
+ {0x442C, 0x00000000},
+ {0x4430, 0x00000000},
+ {0x4434, 0x00000000},
+ {0x4438, 0x00000000},
+ {0x443C, 0x190642D0},
+ {0x4440, 0xA80668A0},
+ {0x4444, 0x60900820},
+ {0x4448, 0x9F28518C},
+ {0x444C, 0x32488A62},
+ {0x4450, 0x9C6E36DC},
+ {0x4454, 0x0000F52B},
+ {0x4458, 0x00000000},
+ {0x445C, 0x4801442E},
+ {0x4460, 0x0051A0B8},
+ {0x4464, 0x00000000},
+ {0x4468, 0x00000000},
+ {0x446C, 0x00000000},
+ {0x4470, 0x00000000},
+ {0x4474, 0x00000000},
+ {0x4478, 0x00000000},
+ {0x447C, 0x00000000},
+ {0x4480, 0x2A0A6040},
+ {0x4484, 0x0A0A6829},
+ {0x4488, 0x00000004},
+ {0x448C, 0x00000000},
+ {0x4490, 0x80000000},
+ {0x4494, 0x10000000},
+ {0x4498, 0xE0000000},
+ {0x4AB4, 0x00000000},
+ {0x449C, 0x0000001E},
+ {0x44A0, 0x02B2C3A6},
+ {0x44A4, 0x00000400},
+ {0x44A8, 0x00000001},
+ {0x44AC, 0x000190C0},
+ {0x44B0, 0x00000000},
+ {0x44B4, 0x00000000},
+ {0x44B8, 0x00000000},
+ {0x44BC, 0x00000000},
+ {0x44C0, 0x00000000},
+ {0x44C4, 0x00000000},
+ {0x44C8, 0x00000000},
+ {0x44CC, 0x00000000},
+ {0x44D0, 0x00000000},
+ {0x44D4, 0x00000000},
+ {0x44D8, 0x00000000},
+ {0x44DC, 0x00000000},
+ {0x44E0, 0x00000000},
+ {0x44E4, 0x00000000},
+ {0x44E8, 0x00000000},
+ {0x44EC, 0x00000000},
+ {0x44F0, 0x00000000},
+ {0x44F4, 0x00000000},
+ {0x44F8, 0x00000000},
+ {0x44FC, 0x00000000},
+ {0x4500, 0x00000000},
+ {0x4504, 0x00000000},
+ {0x4508, 0x00000000},
+ {0x450C, 0x00000000},
+ {0x4510, 0x00000000},
+ {0x4514, 0x00000000},
+ {0x4518, 0x00000000},
+ {0x451C, 0x00000000},
+ {0x4520, 0x00000000},
+ {0x4524, 0x00000000},
+ {0x4528, 0x00000000},
+ {0x452C, 0x00000000},
+ {0x4530, 0x4E830171},
+ {0x4534, 0x00000870},
+ {0x4538, 0x000000FF},
+ {0x453C, 0x00000000},
+ {0x4540, 0x00000000},
+ {0x4544, 0x00000000},
+ {0x4548, 0x00000000},
+ {0x454C, 0x00000000},
+ {0x4550, 0x00000000},
+ {0x4554, 0x00000000},
+ {0x4558, 0x00000000},
+ {0x455C, 0x00000000},
+ {0x4560, 0x40000000},
+ {0x4564, 0x40000000},
+ {0x4568, 0x00000000},
+ {0x456C, 0x20000000},
+ {0x4570, 0x04F040BB},
+ {0x4574, 0x000E53FF},
+ {0x4578, 0x000205CB},
+ {0x457C, 0x00200000},
+ {0x4580, 0x00000040},
+ {0x4584, 0x00000000},
+ {0x4588, 0x00000017},
+ {0x458C, 0x30000000},
+ {0x4590, 0x00000000},
+ {0x4594, 0x00000000},
+ {0x4598, 0x00000001},
+ {0x459C, 0x0003FE00},
+ {0x45A0, 0x00000086},
+ {0x45A4, 0x00000000},
+ {0x45A8, 0xC00001C0},
+ {0x45AC, 0x78038000},
+ {0x45B0, 0x8000004A},
+ {0x45B4, 0x04094800},
+ {0x45B8, 0x00280002},
+ {0x45BC, 0x06748790},
+ {0x45C0, 0x80000000},
+ {0x45C4, 0x00000000},
+ {0x45C8, 0x00000000},
+ {0x45CC, 0x00558670},
+ {0x45D0, 0x002883F0},
+ {0x45D4, 0x00090120},
+ {0x45D8, 0x00000000},
+ {0x45E0, 0xA3A6D3C4},
+ {0x45E4, 0xAB27B126},
+ {0x45E8, 0x00006778},
+ {0x45F4, 0x000001B5},
+ {0x45EC, 0x11110F0A},
+ {0x45F0, 0x00000003},
+ {0x4A0C, 0x0000000A},
+ {0x45F8, 0x0058BC3F},
+ {0x45FC, 0x00000003},
+ {0x462C, 0x00000020},
+ {0x4600, 0x000003D9},
+ {0x45F0, 0x00000004},
+ {0x4604, 0x002B1CB0},
+ {0x4A50, 0xC0000000},
+ {0x4A54, 0x00001000},
+ {0x4A58, 0x00000000},
+ {0x4A18, 0x00000024},
+ {0x4608, 0x00000001},
+ {0x460C, 0x00000000},
+ {0x4A10, 0x00000001},
+ {0x4610, 0x00000001},
+ {0x4614, 0x16E5298F},
+ {0x4618, 0x18C6294A},
+ {0x461C, 0x0E06318A},
+ {0x4620, 0x0E539CE5},
+ {0x4624, 0x00019287},
+ {0x4A14, 0x000000BF},
+ {0x4628, 0x00000001},
+ {0x4630, 0x000001AA},
+ {0x4A18, 0x00001900},
+ {0x4A1C, 0x000002A6},
+ {0x4634, 0x000000A3},
+ {0x4A20, 0x00000086},
+ {0x4638, 0x01986456},
+ {0x49F8, 0x00000000},
+ {0x463C, 0x00000000},
+ {0x4640, 0x00000000},
+ {0x4644, 0x00C8CC00},
+ {0x4648, 0xC400B6B6},
+ {0x464C, 0xDC400FC0},
+ {0x4A8C, 0x00000110},
+ {0x4650, 0x08882550},
+ {0x4654, 0x08CC2660},
+ {0x4658, 0x09102660},
+ {0x465C, 0x00000154},
+ {0x45DC, 0xC39E38E8},
+ {0x4660, 0x452607E6},
+ {0x4664, 0x6750DC65},
+ {0x4668, 0xF3F0F1ED},
+ {0x466C, 0x30141506},
+ {0x4670, 0x2C2B2B2B},
+ {0x4674, 0x2C2C2C2C},
+ {0x4678, 0xDDB738E8},
+ {0x467C, 0x543618FB},
+ {0x4680, 0x4F31DC6F},
+ {0x4684, 0xFBEBDA00},
+ {0x4688, 0x1A10FF04},
+ {0x468C, 0x282A3000},
+ {0x4690, 0x2A29292A},
+ {0x4694, 0x04FA2A2A},
+ {0x4698, 0xEE0F04D1},
+ {0x469C, 0x99E91436},
+ {0x46A0, 0x0701E79E},
+ {0x46A4, 0x08D77CFF},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60322437},
+ {0x46B0, 0x63666666},
+ {0x46B4, 0x35374425},
+ {0x46B8, 0x35883042},
+ {0x46BC, 0x5177C252},
+ {0x4720, 0x7FFFFD63},
+ {0x4724, 0xB58D11FF},
+ {0x4728, 0x07FFFFFF},
+ {0x472C, 0x0E7893B6},
+ {0x4730, 0xE0391201},
+ {0x4734, 0x00000020},
+ {0x4738, 0x8325C500},
+ {0x473C, 0x00000B7F},
+ {0x46C0, 0x00000000},
+ {0x46C4, 0x00000000},
+ {0x46C8, 0x00000219},
+ {0x46CC, 0x00000000},
+ {0x46D0, 0x00000000},
+ {0x46D4, 0x00000001},
+ {0x46D8, 0x00000001},
+ {0x46DC, 0x00000000},
+ {0x46E0, 0x00000000},
+ {0x46E4, 0x00000151},
+ {0x46E8, 0x00000498},
+ {0x46EC, 0x00000498},
+ {0x46F0, 0x00000000},
+ {0x46F4, 0x00000000},
+ {0x46F8, 0x00001146},
+ {0x46FC, 0x00000000},
+ {0x4700, 0x00000000},
+ {0x4704, 0x00C8CC00},
+ {0x4708, 0xC400B6B6},
+ {0x470C, 0xDC400FC0},
+ {0x4A90, 0x00000110},
+ {0x4710, 0x08882550},
+ {0x4714, 0x08CC2660},
+ {0x4718, 0x09102660},
+ {0x471C, 0x00000154},
+ {0x4740, 0xC69F38E8},
+ {0x4744, 0x462709E9},
+ {0x4748, 0x6750DC67},
+ {0x474C, 0xF3F0F1ED},
+ {0x4750, 0x30141506},
+ {0x4754, 0x2C2B2B2B},
+ {0x4758, 0x2C2C2C2C},
+ {0x475C, 0xE0B738E8},
+ {0x4760, 0x52381BFE},
+ {0x4764, 0x5031DC6C},
+ {0x4768, 0xFBEBDA00},
+ {0x476C, 0x1A10FF04},
+ {0x4770, 0x282A3000},
+ {0x4774, 0x2A29292A},
+ {0x4778, 0x04FA2A2A},
+ {0x477C, 0xEE0F04D1},
+ {0x49F0, 0x99E91436},
+ {0x49F4, 0x0701E79E},
+ {0x49FC, 0x08D77CFF},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60322437},
+ {0x4A64, 0x63666666},
+ {0x4A68, 0x35374425},
+ {0x4A6C, 0x35883042},
+ {0x4A70, 0x5177C252},
+ {0x4A74, 0x7FFFFD63},
+ {0x4A78, 0xB58D11FF},
+ {0x4A7C, 0x07FFFFFF},
+ {0x4A80, 0x0E7893B6},
+ {0x4A9C, 0xE0391201},
+ {0x4AA0, 0x00000020},
+ {0x4AA4, 0x8325C500},
+ {0x4AA8, 0x00000B7F},
+ {0x4780, 0x00000000},
+ {0x4784, 0x00000000},
+ {0x4788, 0x00000219},
+ {0x478C, 0x00000000},
+ {0x4790, 0x00000000},
+ {0x4794, 0x00000001},
+ {0x4798, 0x00000001},
+ {0x479C, 0x00000000},
+ {0x47A0, 0x00000000},
+ {0x47A4, 0x00000151},
+ {0x47A8, 0x00000498},
+ {0x47AC, 0x00000498},
+ {0x47B0, 0x00000000},
+ {0x47B4, 0x00000000},
+ {0x47B8, 0x00001146},
+ {0x47BC, 0x00000002},
+ {0x47C0, 0x00000002},
+ {0x47C4, 0x00000000},
+ {0x47C8, 0xA32103FE},
+ {0x47CC, 0xB20A5328},
+ {0x47D0, 0xC686314F},
+ {0x47D4, 0x000005D7},
+ {0x47D8, 0x009B902A},
+ {0x47DC, 0x009B902A},
+ {0x47E0, 0x98682C18},
+ {0x47E4, 0x6308C4C1},
+ {0x47E8, 0x6248C631},
+ {0x47EC, 0x922A8253},
+ {0x47F0, 0x00000005},
+ {0x47F4, 0x00001759},
+ {0x47F8, 0x4BB02000},
+ {0x47FC, 0x831408BE},
+ {0x4A84, 0x000000E9},
+ {0x4800, 0x9ABBCACB},
+ {0x4804, 0x56767578},
+ {0x4808, 0xBCCBBB13},
+ {0x480C, 0x7889989B},
+ {0x4810, 0xBBB0F455},
+ {0x4814, 0x777BBBBB},
+ {0x4818, 0x15277777},
+ {0x481C, 0x27039CE9},
+ {0x4820, 0x42424432},
+ {0x4824, 0x36058342},
+ {0x4828, 0x00000006},
+ {0x482C, 0x00000005},
+ {0x4830, 0x00000005},
+ {0x4834, 0xC7013016},
+ {0x4838, 0x84413016},
+ {0x483C, 0x84413016},
+ {0x4840, 0x8C413016},
+ {0x4844, 0x8C40B028},
+ {0x4848, 0x3140B028},
+ {0x484C, 0x2940B028},
+ {0x4850, 0x8440B028},
+ {0x4854, 0x2318C610},
+ {0x4858, 0x45344753},
+ {0x485C, 0x236A6A88},
+ {0x4860, 0xAC8DF814},
+ {0x4864, 0x08877ACB},
+ {0x4868, 0x000107AA},
+ {0x4A94, 0x00000000},
+ {0x486C, 0xBCEB4A14},
+ {0x4870, 0x000A3A4A},
+ {0x4874, 0xBCEB4A14},
+ {0x4878, 0x000A3A4A},
+ {0x487C, 0xBCBDBD85},
+ {0x4880, 0x0CABB99A},
+ {0x4884, 0x38384242},
+ {0x4888, 0x0086102E},
+ {0x488C, 0xCA24C82A},
+ {0x4890, 0x00008A62},
+ {0x4894, 0x00000008},
+ {0x4898, 0x009B902A},
+ {0x489C, 0x009B902A},
+ {0x48A0, 0x98682C18},
+ {0x48A4, 0x6308C4C1},
+ {0x48A8, 0x6248C631},
+ {0x48AC, 0x922A8253},
+ {0x48B0, 0x00000005},
+ {0x48B4, 0x00001759},
+ {0x48B8, 0x4BA02000},
+ {0x48BC, 0x831408BE},
+ {0x4A88, 0x000000E9},
+ {0x48C0, 0x9898A8BB},
+ {0x48C4, 0x54535368},
+ {0x48C8, 0x99999B13},
+ {0x48CC, 0x55555899},
+ {0x48D0, 0xBBB07453},
+ {0x48D4, 0x777BBBBB},
+ {0x48D8, 0x15277777},
+ {0x48DC, 0x27039CE9},
+ {0x48E0, 0x31413432},
+ {0x48E4, 0x36058342},
+ {0x48E8, 0x00000006},
+ {0x48EC, 0x00000005},
+ {0x48F0, 0x00000005},
+ {0x48F4, 0xC7013016},
+ {0x48F8, 0x84413016},
+ {0x48FC, 0x84413016},
+ {0x4900, 0x8C413016},
+ {0x4904, 0x8C40B028},
+ {0x4908, 0x3140B028},
+ {0x490C, 0x2940B028},
+ {0x4910, 0x8440B028},
+ {0x4914, 0x2318C610},
+ {0x4918, 0x45334753},
+ {0x491C, 0x236A6A88},
+ {0x4920, 0xAC8DF814},
+ {0x4924, 0x08877ACB},
+ {0x4928, 0x000007AA},
+ {0x4A98, 0x00000000},
+ {0x492C, 0xBCEB4A14},
+ {0x4930, 0x000A3A4A},
+ {0x4934, 0xBCEB4A14},
+ {0x4938, 0x000A3A4A},
+ {0x493C, 0x9A8A8A85},
+ {0x4940, 0x0CA3B99A},
+ {0x4944, 0x38384242},
+ {0x4948, 0x8086102E},
+ {0x494C, 0xCA24C82A},
+ {0x4950, 0x00008A62},
+ {0x4954, 0x00000008},
+ {0x4958, 0x80040000},
+ {0x495C, 0x80040000},
+ {0x4960, 0xFE800000},
+ {0x4964, 0x834C0000},
+ {0x4968, 0x00000000},
+ {0x496C, 0x00000000},
+ {0x4970, 0x00000000},
+ {0x4974, 0x00000000},
+ {0x4978, 0x00000000},
+ {0x497C, 0x00000000},
+ {0x4980, 0x40000000},
+ {0x4984, 0x00000000},
+ {0x4988, 0x00000000},
+ {0x498C, 0x00000000},
+ {0x4990, 0x00000000},
+ {0x4994, 0x04065800},
+ {0x4998, 0x02004080},
+ {0x499C, 0x0E1E3E05},
+ {0x49A0, 0x0A163068},
+ {0x49A4, 0x00206040},
+ {0x49A8, 0x02020202},
+ {0x49AC, 0x00002020},
+ {0x49B0, 0xF8F8F418},
+ {0x49B4, 0xF8E8F8F8},
+ {0x49B8, 0xF80808E8},
+ {0x4A00, 0xF8F8FA00},
+ {0x4A04, 0xFAFAFAF8},
+ {0x4A08, 0xFAFAFAFA},
+ {0x4A28, 0xFAFAFAFA},
+ {0x4A2C, 0xFAFAFAFA},
+ {0x4A30, 0xFAFAFAFA},
+ {0x4A34, 0xFAFAFAFA},
+ {0x4A38, 0xFAFAFAFA},
+ {0x4A3C, 0xFAFAFAFA},
+ {0x4A40, 0xFAFAFAFA},
+ {0x4A44, 0x0000FAFA},
+ {0x49BC, 0x00000000},
+ {0x49C0, 0x800CD62D},
+ {0x49C4, 0x00000103},
+ {0x49C8, 0x00000000},
+ {0x49CC, 0x00000000},
+ {0x49D0, 0x00000000},
+ {0x49D4, 0x00000000},
+ {0x49D8, 0x00000000},
+ {0x49DC, 0x00000000},
+ {0x49E0, 0x00000000},
+ {0x49E4, 0x00000000},
+ {0x49E8, 0x00000000},
+ {0x49EC, 0x00000000},
+ {0x994, 0x00000010},
+ {0x904, 0x00000005},
+ {0xC3C, 0x2840E1BF},
+ {0xC40, 0x00000000},
+ {0xC44, 0x00000007},
+ {0xC48, 0x410E4000},
+ {0xC54, 0x1EE14368},
+ {0xC58, 0x41000000},
+ {0x730, 0x00000002},
+ {0xC60, 0x017FFFF2},
+ {0xC64, 0x0010A130},
+ {0xC68, 0x10000050},
+ {0xC6C, 0x10001021},
+ {0x708, 0x00000000},
+ {0x884, 0x0043F01D},
+ {0x704, 0x601E0100},
+ {0x710, 0xEF810000},
+ {0x704, 0x601E0100},
+ {0xD40, 0xF64FA0F7},
+ {0xD44, 0x0400063F},
+ {0xD48, 0x0003FF7F},
+ {0xD4C, 0x00000000},
+ {0xD50, 0xF64FA0F7},
+ {0xD54, 0x04100437},
+ {0xD58, 0x0000FF7F},
+ {0xD5C, 0x00000000},
+ {0xD60, 0x00000000},
+ {0xD64, 0x00000000},
+ {0xD70, 0x00000015},
+ {0xD90, 0x000003FF},
+ {0xD94, 0x00000000},
+ {0xD98, 0x0000003F},
+ {0xD9C, 0x00000000},
+ {0xDA0, 0x000003FE},
+ {0xDA4, 0x00000000},
+ {0xDA8, 0x0000003F},
+ {0xDAC, 0x00000000},
+ {0xD00, 0x77777777},
+ {0xD04, 0xBBBBBBBB},
+ {0xD08, 0xBBBBBBBB},
+ {0xD0C, 0x00000070},
+ {0xD10, 0x20110900},
+ {0xD10, 0x20110FFF},
+ {0xD78, 0x00000001},
+ {0xD7C, 0x001D050E},
+ {0xD84, 0x00004207},
+ {0xD18, 0x50209900},
+ {0xD80, 0x00804100},
+ {0x718, 0x1333233F},
+ {0x604, 0x041E1E1E},
+ {0x714, 0x00010000},
+ {0x586C, 0x000000F0},
+ {0x586C, 0x000000E0},
+ {0x586C, 0x000000D0},
+ {0x586C, 0x000000C0},
+ {0x586C, 0x000000B0},
+ {0x586C, 0x000000A0},
+ {0x586C, 0x00000090},
+ {0x586C, 0x00000080},
+ {0x586C, 0x00000070},
+ {0x586C, 0x00000060},
+ {0x586C, 0x00000050},
+ {0x586C, 0x00000040},
+ {0x586C, 0x00000030},
+ {0x586C, 0x00000020},
+ {0x586C, 0x00000010},
+ {0x586C, 0x00000000},
+ {0x786C, 0x000000F0},
+ {0x786C, 0x000000E0},
+ {0x786C, 0x000000D0},
+ {0x786C, 0x000000C0},
+ {0x786C, 0x000000B0},
+ {0x786C, 0x000000A0},
+ {0x786C, 0x00000090},
+ {0x786C, 0x00000080},
+ {0x786C, 0x00000070},
+ {0x786C, 0x00000060},
+ {0x786C, 0x00000050},
+ {0x786C, 0x00000040},
+ {0x786C, 0x00000030},
+ {0x786C, 0x00000020},
+ {0x786C, 0x00000010},
+ {0x786C, 0x00000000},
+ {0xC0D4, 0x4486888C},
+ {0xC0D8, 0xC6BA10E1},
+ {0xC0DC, 0x30C52868},
+ {0xC0E0, 0x05008128},
+ {0xC0E4, 0x0000A72B},
+ {0xC1D4, 0x4486888C},
+ {0xC1D8, 0xC6BA10E1},
+ {0xC1DC, 0x30C52868},
+ {0xC1E0, 0x05008128},
+ {0xC1E4, 0x0000A72B},
+ {0xC0EC, 0x00000000},
+ {0xC0E4, 0x0000272B},
+ {0xC1EC, 0x00000000},
+ {0xC1E4, 0x0000272B},
+ {0x334, 0xFFFFFFFF},
+ {0x33C, 0x55000000},
+ {0x340, 0x00005555},
+ {0x724, 0x00111200},
+ {0x5868, 0xA9550000},
+ {0x5870, 0x33221100},
+ {0x5874, 0x77665544},
+ {0x5878, 0xBBAA9988},
+ {0x587C, 0xFFEEDDCC},
+ {0x5880, 0x76543210},
+ {0x5884, 0xFEDCBA98},
+ {0x5888, 0x00000000},
+ {0x588C, 0x00000000},
+ {0x5894, 0x00000008},
+ {0x7868, 0xA9550000},
+ {0x7870, 0x33221100},
+ {0x7874, 0x77665544},
+ {0x7878, 0xBBAA9988},
+ {0x787C, 0xFFEEDDCC},
+ {0x7880, 0x76543210},
+ {0x7884, 0xFEDCBA98},
+ {0x7888, 0x00000000},
+ {0x788C, 0x00000000},
+ {0x7894, 0x00000008},
+ {0x650, 0x00200888},
+ {0x710, 0xF3810000},
+ {0x020, 0x0000F381},
+ {0x024, 0x0000F381},
+ {0x000, 0xC580801E},
+ {0xC70, 0x00000400},
+ {0x980, 0x10002250},
+ {0x988, 0x3C3C4107},
+ {0x994, 0x00000010},
+ {0x2994, 0x00000010},
+ {0x000, 0x0580801F},
+ {0x240C, 0x00000000},
+ {0x640, 0x140A141E},
+ {0x640, 0x1414141E},
+ {0x640, 0x1414141E},
+ {0x644, 0x3414283C},
+ {0x644, 0x3425283C},
+ {0x644, 0x3426283C},
+ {0x2640, 0x140A141E},
+ {0x2640, 0x1414141E},
+ {0x2640, 0x1414141E},
+ {0x2644, 0x3414283C},
+ {0x2644, 0x3425283C},
+ {0x2644, 0x3425183C},
+ {0x2300, 0x02748790},
+ {0x2304, 0x00558670},
+ {0x2308, 0x002883F0},
+ {0x230C, 0x00090120},
+ {0x2310, 0x00000000},
+ {0x2314, 0x06000000},
+ {0x2318, 0x00000000},
+ {0x231C, 0x00000000},
+ {0x2320, 0x03020100},
+ {0x2324, 0x07060504},
+ {0x2328, 0x0B0A0908},
+ {0x232C, 0x0F0E0D0C},
+ {0x2330, 0x13121110},
+ {0x2334, 0x17161514},
+ {0x2338, 0x0C700022},
+ {0x233C, 0x0A0529D0},
+ {0x2340, 0x000529D0},
+ {0x2344, 0x0006318A},
+ {0x2348, 0xB7E6318A},
+ {0x234C, 0x80039C00},
+ {0x2350, 0x80039C00},
+ {0x2354, 0x0005298F},
+ {0x2358, 0x0015296E},
+ {0x235C, 0x0C07FC31},
+ {0x2360, 0x0219AAAE},
+ {0x2364, 0xE4F624C3},
+ {0x2368, 0x53626F15},
+ {0x236C, 0x48000000},
+ {0x2370, 0x48000000},
+ {0x2374, 0x07540000},
+ {0x2378, 0x202401B9},
+ {0x237C, 0x00F7000E},
+ {0x2380, 0x0F0A1111},
+ {0x2384, 0x30D9000F},
+ {0x2388, 0x0200EA02},
+ {0x238C, 0x003CB061},
+ {0x2390, 0x69C00000},
+ {0x2394, 0x00000000},
+ {0x2398, 0x000000F0},
+ {0x239C, 0x0001FFFF},
+ {0x23A0, 0x00C80064},
+ {0x23A4, 0x0190012C},
+ {0x23A8, 0x001917BE},
+ {0x23AC, 0x0B30880C},
+ {0x23B0, 0x9281CE00},
+ {0x23B4, 0x7F027C00},
+ {0x704, 0x601E0102},
+ {0x704, 0x601E0102},
+ {0x5864, 0x080801FF},
+ {0x7864, 0x080801FF},
+ {0xC60, 0x017FFFF3},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x8088, 0x007F0000},
+ {0x81A4, 0x003F3A00},
+ {0x81B4, 0x0100007F},
+ {0x81C0, 0x0060010B},
+ {0x81A0, 0x00000010},
+ {0x8138, 0x00000002},
+ {0x82A4, 0x003F3A00},
+ {0x82B4, 0x0100007F},
+ {0x82C0, 0x0060010B},
+ {0x82A0, 0x00000010},
+ {0x81A0, 0x00000010},
+ {0x8238, 0x00000002},
+ {0x8088, 0x00000000},
+ {0x8020, 0x00000000},
+ {0x8120, 0x00000000},
+ {0x8220, 0x00000000},
+ {0x8124, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC70, 0x00000600},
+ {0xC70, 0x00000660},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x8120, 0x10000000},
+ {0x8120, 0x10030000},
+ {0x8124, 0x00000F0F},
+ {0x8124, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x8220, 0x10000000},
+ {0x8220, 0x10030000},
+ {0x704, 0x601E0100},
+ {0x5864, 0x100801FF},
+ {0x7864, 0x100801FF},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0x58D4, 0x7401FE00},
+ {0x78D4, 0x7401FE00},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x704, 0x601E0102},
+ {0xC7C, 0x0020BFE0},
+ {0x58C0, 0x00FE0000},
+ {0x58FC, 0x00000000},
+ {0x566C, 0x00000005},
+ {0x566C, 0x00001005},
+ {0x78C0, 0x00FE0000},
+ {0x78FC, 0x00000000},
+ {0x700, 0x00000030},
+ {0x704, 0x601E0102},
+ {0x704, 0x601E0100},
+ {0x704, 0x601E0502},
+ {0x20FC, 0x00000000},
+ {0x20F8, 0x00000000},
+ {0x20F0, 0x00000000},
+ {0x9C0, 0x00000001},
+ {0x9C0, 0x00000000},
+ {0x9C0, 0x00000001},
+ {0x9C0, 0x00000000},
+ {0x4AE8, 0x00000744},
+ {0x4AF0, 0x00000744},
+ {0x1010, 0x00000010},
+ {0x3010, 0x00000010},
+ {0x4AD4, 0x00000040},
+ {0x4AE0, 0x00000040},
+ {0x4AE4, 0x0079E99E},
+ {0x4AEC, 0x0079E99E},
+ {0x300, 0xF30CE31C},
+ {0x304, 0x13EF1F19},
+ {0x308, 0x0C0CF3F3},
+ {0x30C, 0x0C0C0C0C},
+ {0x310, 0x80496000},
+ {0x314, 0x0041E000},
+ {0x318, 0x20022042},
+ {0x31C, 0x20448009},
+ {0x320, 0x00010031},
+ {0x324, 0xE000E000},
+ {0x328, 0xE000E000},
+ {0x32C, 0xE000E000},
+ {0x12BC, 0x10104041},
+ {0x12C0, 0x14411111},
+ {0x32BC, 0x10104041},
+ {0x32C0, 0x14411111},
+ {0x010, 0x0005FFFF},
+ {0x028, 0x0000F381},
+ {0x02C, 0x0000F381},
+ {0x620, 0x00141230},
+ {0x704, 0x601C05FF},
+ {0x720, 0x20000000},
+ {0x738, 0x004100CC},
+ {0x12A0, 0x24903056},
+ {0x12AC, 0x12333121},
+ {0x12B8, 0x30020000},
+ {0x12E4, 0x30D52A68},
+ {0x2000, 0x50BBBF04},
+ {0x32A0, 0x24903056},
+ {0x32AC, 0x12333121},
+ {0x32B8, 0x30020000},
+ {0x32E4, 0x30D52A68},
+ {0x5800, 0x03FF807F},
+ {0x5804, 0x04237040},
+ {0x5808, 0x04237040},
+ {0x7800, 0x03FF807F},
+ {0x7804, 0x04237040},
+ {0x7808, 0x04237040},
+ {0x73C, 0x00000002},
+ {0x74C, 0x00000001},
+ {0x748, 0x00000002},
+ {0x5818, 0x082C1800},
+ {0x7818, 0x082C1800},
+ {0x624, 0x0101030A},
+ {0xC14, 0x85010000},
+ {0xDD4, 0x00000001},
+ {0x241C, 0x00000001},
+ {0x1200, 0x00010142},
+ {0x3200, 0x00010142},
+ {0xC0F8, 0x00000001},
+ {0xC1F8, 0x00000001},
+ {0x35C, 0x000004C4},
+ {0x0F0, 0x00000002},
+ {0x0F4, 0x00000028},
+ {0x0F8, 0x20220408},
+};
+
+static const struct rtw89_reg2_def rtw89_8852b_phy_bb_reg_gain[] = {
+ {0x000, 0x18FBDDB7},
+ {0x001, 0x006F5436},
+ {0x002, 0x00004F31},
+ {0x100, 0x1BFEE0B7},
+ {0x101, 0x006C5238},
+ {0x102, 0x00005031},
+ {0x10000, 0x07E6C39E},
+ {0x10001, 0x00654526},
+ {0x10002, 0x00006750},
+ {0x10100, 0x09E9C69F},
+ {0x10101, 0x00674627},
+ {0x10102, 0x00006750},
+ {0x20000, 0x06E8C49F},
+ {0x20001, 0x00654526},
+ {0x20002, 0x00006750},
+ {0x20100, 0x07E9C6A0},
+ {0x20101, 0x00674728},
+ {0x20102, 0x00006850},
+ {0x30000, 0x04E5C39D},
+ {0x30001, 0x00634325},
+ {0x30002, 0x00006750},
+ {0x30100, 0x06E9C69F},
+ {0x30101, 0x00654527},
+ {0x30102, 0x00006750},
+ {0x1000000, 0x000000F4},
+ {0x1000010, 0x000000F8},
+ {0x1000011, 0x0000F8F8},
+ {0x1000100, 0x000000F8},
+ {0x1000110, 0x00000000},
+ {0x1000111, 0x00000000},
+ {0x1010000, 0x000000F4},
+ {0x1010010, 0x000000F8},
+ {0x1010011, 0x0000F8F8},
+ {0x1010020, 0x000000F8},
+ {0x1010021, 0x0808E8E8},
+ {0x1010029, 0x0000F8F8},
+ {0x1010100, 0x000000F4},
+ {0x1010110, 0x000000F8},
+ {0x1010111, 0x0000F8F8},
+ {0x1010120, 0x000000F8},
+ {0x1010121, 0x0808E8E8},
+ {0x1010129, 0x0000F8F8},
+ {0x1020000, 0x000000F4},
+ {0x1020010, 0x000000F8},
+ {0x1020011, 0x0000F8F8},
+ {0x1020020, 0x000000F8},
+ {0x1020021, 0x0808E8E8},
+ {0x1020029, 0x0000F8F8},
+ {0x1020100, 0x000000F4},
+ {0x1020110, 0x000000F8},
+ {0x1020111, 0x0000F8F8},
+ {0x1020120, 0x000000F8},
+ {0x1020121, 0x0808E8E8},
+ {0x1020129, 0x0000F8F8},
+ {0x1030000, 0x000000F4},
+ {0x1030010, 0x000000F8},
+ {0x1030011, 0x0000F8F8},
+ {0x1030020, 0x000000F8},
+ {0x1030021, 0x0808E8E8},
+ {0x1030029, 0x0000F8F8},
+ {0x1030100, 0x000000F4},
+ {0x1030110, 0x000000F8},
+ {0x1030111, 0x0000F8F8},
+ {0x1030120, 0x000000F8},
+ {0x1030121, 0x0808E8E8},
+ {0x1030129, 0x0000F8F8},
+};
+
+static const struct rtw89_reg2_def rtw89_8852b_phy_radioa_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0020000, 0x00000001},
+ {0xF0010001, 0x00000002},
+ {0xF0020001, 0x00000003},
+ {0xF0030001, 0x00000004},
+ {0xF0040001, 0x00000005},
+ {0xF0050001, 0x00000006},
+ {0xF0060001, 0x00000007},
+ {0xF0070001, 0x00000008},
+ {0xF0080001, 0x00000009},
+ {0xF0290001, 0x0000000A},
+ {0xF02B0001, 0x0000000B},
+ {0x005, 0x00000000},
+ {0x000, 0x00030000},
+ {0x10000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x10018, 0x00011124},
+ {0x000, 0x00033C00},
+ {0x10000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x011, 0x00014073},
+ {0x067, 0x00000070},
+ {0x059, 0x000A0000},
+ {0x066, 0x00000100},
+ {0x057, 0x0000D589},
+ {0x05A, 0x0007FFFF},
+ {0x0A4, 0x0006FF12},
+ {0x043, 0x00005000},
+ {0x0E1, 0x00000001},
+ {0x0DD, 0x000001A0},
+ {0x0CA, 0x00002000},
+ {0x0D3, 0x00000003},
+ {0x0B3, 0x0004EFE0},
+ {0x0B4, 0x0007C07E},
+ {0x0B5, 0x0003A701},
+ {0x0B6, 0x000581E0},
+ {0x0B7, 0x00001A0A},
+ {0x0BB, 0x000C7000},
+ {0x0ED, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000543},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000542},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000541},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000521},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000343},
+ {0x033, 0x00000005},
+ {0x03F, 0x00000342},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000341},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000321},
+ {0x033, 0x00000008},
+ {0x03F, 0x000005C3},
+ {0x033, 0x00000009},
+ {0x03F, 0x000005C2},
+ {0x033, 0x0000000A},
+ {0x03F, 0x000005C1},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000005A1},
+ {0x033, 0x0000000C},
+ {0x03F, 0x000002C3},
+ {0x033, 0x0000000D},
+ {0x03F, 0x000002C2},
+ {0x033, 0x0000000E},
+ {0x03F, 0x000002C1},
+ {0x033, 0x0000000F},
+ {0x03F, 0x000002A1},
+ {0x0ED, 0x00000000},
+ {0x0ED, 0x00002000},
+ {0x033, 0x00000002},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000006},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x0ED, 0x00000000},
+ {0x018, 0x00001001},
+ {0x10018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x10002, 0x0000000D},
+ {0x0EE, 0x00000004},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x0EE, 0x00000000},
+ {0x08F, 0x000D0F7A},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000008},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000034C0},
+ {0x033, 0x0000000A},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000035D0},
+ {0x033, 0x0000000B},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000035C8},
+ {0x033, 0x0000008A},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000035F7},
+ {0x0EF, 0x00000000},
+ {0x08D, 0x000CC800},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000700},
+ {0x033, 0x00000005},
+ {0x03F, 0x00090600},
+ {0x033, 0x00000004},
+ {0x03F, 0x000A3500},
+ {0x033, 0x00000003},
+ {0x03F, 0x000A3400},
+ {0x033, 0x00000002},
+ {0x03F, 0x00008B00},
+ {0x033, 0x00000001},
+ {0x03F, 0x00001B00},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003A00},
+ {0x033, 0x0000000F},
+ {0x03F, 0x00000700},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000700},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00090600},
+ {0x033, 0x0000000C},
+ {0x03F, 0x000A3500},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000A3400},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00008B00},
+ {0x033, 0x00000009},
+ {0x03F, 0x00001B00},
+ {0x033, 0x00000008},
+ {0x03F, 0x00003A00},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000010},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000001},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000000},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000001},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000002},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000003},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000004},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000005},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000006},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000008},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000009},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x0000000E},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000010},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000011},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000012},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000013},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000014},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000020},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000021},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000022},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000023},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000024},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000025},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000026},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000028},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000029},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000002A},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000002B},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x0000002C},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x0000002D},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x0000002E},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000030},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000031},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000032},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000033},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000034},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000035},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000036},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x03F, 0x00004376},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x03F, 0x00004376},
+ {0x033, 0x00000007},
+ {0x03F, 0x00004376},
+ {0x033, 0x00000008},
+ {0x03F, 0x00004376},
+ {0x033, 0x00000009},
+ {0x03F, 0x00004376},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004386},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004396},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x067, 0x00008072},
+ {0x0EF, 0x00000010},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000ED5},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000FC7},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000783},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000973},
+ {0x033, 0x00000005},
+ {0x03F, 0x00000762},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000762},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023958},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026858},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023A58},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002C758},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000800},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000005},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000005},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000006},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x0EE, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003000},
+ {0x033, 0x00000001},
+ {0x03F, 0x00003001},
+ {0x033, 0x00000002},
+ {0x03F, 0x00003003},
+ {0x033, 0x00000003},
+ {0x03F, 0x00003007},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000300F},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000310F},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000330F},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000330F},
+ {0x033, 0x00000008},
+ {0x03F, 0x00003000},
+ {0x033, 0x00000009},
+ {0x03F, 0x00003001},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00003003},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003103},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00002307},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00000000},
+ {0x0EE, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000005},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000100},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000100},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0EC, 0x00000100},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03D, 0x00000078},
+ {0x03E, 0x00080000},
+ {0x03F, 0x00000000},
+ {0x033, 0x00000005},
+ {0x03D, 0x0000007B},
+ {0x03E, 0x00020000},
+ {0x03F, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x033, 0x00000000},
+ {0x008, 0x00060280},
+ {0x009, 0x00030400},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001F7},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001F7},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001F7},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000017F},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000017F},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000007F},
+ {0x0EF, 0x00000000},
+ {0x06E, 0x00077A18},
+ {0x06F, 0x00077A18},
+ {0x06D, 0x00000C31},
+ {0x0EF, 0x00020000},
+ {0x033, 0x00000000},
+ {0x03F, 0x000005FF},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0xA0000000, 0x00000000},
+ {0x094, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10033, 0x00000080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000081},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000082},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F0},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000083},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000ED},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000EA},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000E7},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000088},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000089},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000026},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000023},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001A},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000090},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000014},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A2},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F0},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A3},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000ED},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000EA},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000E7},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000026},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000023},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001A},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000B0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000014},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C2},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F0},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C3},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000ED},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000EA},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000E7},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000026},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000023},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001A},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000D0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000014},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x10033, 0x00000080},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x00000081},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x00000082},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x00000083},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000191},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000018B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000014D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000010B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000088},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000089},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000093},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000053},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000090},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000091},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A0},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x000000A1},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x000000A2},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x000000A3},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x000000A4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000191},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000018B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000014D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000010B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000093},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000053},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000B0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000B1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C0},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x000000C1},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x000000C2},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x000000C3},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x000000C4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000191},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000018B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000014D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000010B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000093},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000053},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000D0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000D1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10033, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0x10033, 0x00000001},
+ {0x1003F, 0x000000F3},
+ {0x10033, 0x00000002},
+ {0x1003F, 0x000000F0},
+ {0x10033, 0x00000003},
+ {0x1003F, 0x000000ED},
+ {0x10033, 0x00000004},
+ {0x1003F, 0x000000EA},
+ {0x10033, 0x00000005},
+ {0x1003F, 0x000000E7},
+ {0x10033, 0x00000006},
+ {0x1003F, 0x000000A6},
+ {0x10033, 0x00000007},
+ {0x1003F, 0x000000A3},
+ {0x10033, 0x00000008},
+ {0x1003F, 0x00000063},
+ {0x10033, 0x00000009},
+ {0x1003F, 0x00000060},
+ {0x10033, 0x0000000A},
+ {0x1003F, 0x00000023},
+ {0x10033, 0x0000000B},
+ {0x1003F, 0x00000020},
+ {0x10033, 0x0000000C},
+ {0x1003F, 0x0000001D},
+ {0x10033, 0x0000000D},
+ {0x1003F, 0x0000001A},
+ {0x10033, 0x0000000E},
+ {0x1003F, 0x00000017},
+ {0x10033, 0x0000000F},
+ {0x1003F, 0x00000014},
+ {0x10033, 0x00000010},
+ {0x1003F, 0x00000011},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x10033, 0x00000000},
+ {0x1003F, 0x000001AF},
+ {0x10033, 0x00000001},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x00000002},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x00000003},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x00000004},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x00000005},
+ {0x1003F, 0x0000015F},
+ {0x10033, 0x00000006},
+ {0x1003F, 0x00000159},
+ {0x10033, 0x00000007},
+ {0x1003F, 0x0000011F},
+ {0x10033, 0x00000008},
+ {0x1003F, 0x00000119},
+ {0x10033, 0x00000009},
+ {0x1003F, 0x000000DF},
+ {0x10033, 0x0000000A},
+ {0x1003F, 0x000000D9},
+ {0x10033, 0x0000000B},
+ {0x1003F, 0x0000009F},
+ {0x10033, 0x0000000C},
+ {0x1003F, 0x00000099},
+ {0x10033, 0x0000000D},
+ {0x1003F, 0x0000005F},
+ {0x10033, 0x0000000E},
+ {0x1003F, 0x00000059},
+ {0x10033, 0x0000000F},
+ {0x1003F, 0x0000001F},
+ {0x10033, 0x00000010},
+ {0x1003F, 0x00000019},
+ {0x10033, 0x00000011},
+ {0x1003F, 0x00000013},
+ {0x100EE, 0x00000000},
+ {0x10005, 0x00000001},
+ {0x09F, 0x00000032},
+};
+
+static const struct rtw89_reg2_def rtw89_8852b_phy_radiob_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0020000, 0x00000001},
+ {0xF0010001, 0x00000002},
+ {0xF0020001, 0x00000003},
+ {0xF0030001, 0x00000004},
+ {0xF0040001, 0x00000005},
+ {0xF0050001, 0x00000006},
+ {0xF0060001, 0x00000007},
+ {0xF0070001, 0x00000008},
+ {0xF0080001, 0x00000009},
+ {0xF0290001, 0x0000000A},
+ {0xF02B0001, 0x0000000B},
+ {0x005, 0x00000000},
+ {0x000, 0x00030000},
+ {0x10000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x10018, 0x00011124},
+ {0x000, 0x00033C00},
+ {0x10000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x011, 0x00014073},
+ {0x067, 0x00000070},
+ {0x059, 0x000A0000},
+ {0x066, 0x00000100},
+ {0x05A, 0x0007F000},
+ {0x0A4, 0x0006FF12},
+ {0x043, 0x00005000},
+ {0x0E1, 0x00000001},
+ {0x0DD, 0x000001A0},
+ {0x0CA, 0x00002000},
+ {0x0D3, 0x00000003},
+ {0x0B3, 0x0004EFE0},
+ {0x0B4, 0x0007C03E},
+ {0x0B5, 0x0003A201},
+ {0x0BB, 0x000C7000},
+ {0x0ED, 0x00002000},
+ {0x033, 0x00000002},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000006},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x0ED, 0x00000000},
+ {0x018, 0x00001001},
+ {0x10018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x10002, 0x0000000D},
+ {0x0EE, 0x00000004},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x0EE, 0x00000000},
+ {0x08F, 0x000D0F7A},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D30},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000034C0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D74},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000035D0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D72},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x000000C4},
+ {0x03F, 0x000035C8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000008A},
+ {0x03E, 0x00000031},
+ {0x03F, 0x00000D7D},
+ {0x0EF, 0x00000000},
+ {0x08D, 0x000CC800},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000700},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000700},
+ {0x033, 0x00000005},
+ {0x03F, 0x00090600},
+ {0x033, 0x00000004},
+ {0x03F, 0x000A3500},
+ {0x033, 0x00000003},
+ {0x03F, 0x000A3400},
+ {0x033, 0x00000002},
+ {0x03F, 0x00008B00},
+ {0x033, 0x00000001},
+ {0x03F, 0x00001B00},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003A00},
+ {0x033, 0x0000000F},
+ {0x03F, 0x00000700},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000700},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00090600},
+ {0x033, 0x0000000C},
+ {0x03F, 0x000A3500},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000A3400},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00008B00},
+ {0x033, 0x00000009},
+ {0x03F, 0x00001B00},
+ {0x033, 0x00000008},
+ {0x03F, 0x00003A00},
+ {0x033, 0x00000017},
+ {0x03F, 0x00000705},
+ {0x033, 0x00000016},
+ {0x03F, 0x00000705},
+ {0x033, 0x00000015},
+ {0x03F, 0x00090605},
+ {0x033, 0x00000014},
+ {0x03F, 0x000A3505},
+ {0x033, 0x00000013},
+ {0x03F, 0x000A3405},
+ {0x033, 0x00000012},
+ {0x03F, 0x00008B05},
+ {0x033, 0x00000011},
+ {0x03F, 0x00001B05},
+ {0x033, 0x00000010},
+ {0x03F, 0x00003A05},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000010},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000001},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000015},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000005},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000000},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000001},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000002},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000003},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000004},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000005},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000006},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000008},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000009},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x0000000E},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000010},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000011},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000012},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000013},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000014},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000020},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000021},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000022},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000023},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000024},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000025},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000026},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000028},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000029},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000002A},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x0000002B},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x0000002C},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x0000002D},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x0000002E},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x033, 0x00000030},
+ {0x03E, 0x00004FC0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000031},
+ {0x03E, 0x000046C0},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000032},
+ {0x03E, 0x00004240},
+ {0x03F, 0x00000087},
+ {0x033, 0x00000033},
+ {0x03E, 0x00008010},
+ {0x03F, 0x00000147},
+ {0x033, 0x00000034},
+ {0x03E, 0x0000A048},
+ {0x03F, 0x0000004F},
+ {0x033, 0x00000035},
+ {0x03E, 0x0000A030},
+ {0x03F, 0x0000005F},
+ {0x033, 0x00000036},
+ {0x03E, 0x0000A000},
+ {0x03F, 0x0000009F},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x033, 0x00000001},
+ {0x03F, 0x00004346},
+ {0x033, 0x00000002},
+ {0x03F, 0x00004346},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03F, 0x00004346},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004317},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004376},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000043A6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004347},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004346},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00004366},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x03F, 0x00004386},
+ {0x033, 0x00000024},
+ {0x03F, 0x00004386},
+ {0x033, 0x00000025},
+ {0x03F, 0x00004386},
+ {0x033, 0x00000026},
+ {0x03F, 0x00004386},
+ {0x033, 0x00000027},
+ {0x03F, 0x00004386},
+ {0x0EF, 0x00000000},
+ {0x067, 0x00008072},
+ {0x0EF, 0x00000010},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000ED5},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000FC5},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000A93},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000973},
+ {0x033, 0x00000005},
+ {0x03F, 0x00000761},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000761},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022658},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00026458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00022858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x03E, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00020858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00027558},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000800},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000005},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000005},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000006},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x0EE, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003000},
+ {0x033, 0x00000001},
+ {0x03F, 0x00003001},
+ {0x033, 0x00000002},
+ {0x03F, 0x00003003},
+ {0x033, 0x00000003},
+ {0x03F, 0x00003007},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000300F},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000310F},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000330F},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000330F},
+ {0x033, 0x00000008},
+ {0x03F, 0x00003000},
+ {0x033, 0x00000009},
+ {0x03F, 0x00003001},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00003003},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003007},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003103},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003107},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003307},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00002307},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001307},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000307},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00000000},
+ {0x0EE, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000005},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000100},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000100},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0EC, 0x00000100},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03D, 0x00000078},
+ {0x03E, 0x00080000},
+ {0x03F, 0x00000000},
+ {0x033, 0x00000005},
+ {0x03D, 0x0000007B},
+ {0x03E, 0x00020000},
+ {0x03F, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x033, 0x00000000},
+ {0x008, 0x00060280},
+ {0x009, 0x00030400},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001F7},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001F7},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000013F},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FB},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001F7},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000000FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000017F},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000017F},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000007F},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000007F},
+ {0x0EF, 0x00000000},
+ {0x06E, 0x00077A18},
+ {0x06F, 0x00077A18},
+ {0x06D, 0x00000C31},
+ {0x0EF, 0x00020000},
+ {0x033, 0x00000000},
+ {0x03F, 0x000005FF},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x094, 0x000000FC},
+ {0xA0000000, 0x00000000},
+ {0x094, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10033, 0x00000080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000081},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000082},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F0},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000083},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000ED},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000EA},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000E7},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000088},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000089},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000026},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000023},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001A},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000090},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000014},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A2},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F0},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A3},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000ED},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000EA},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000E7},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000026},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000023},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001A},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000B0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000014},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000FB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C2},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F5},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000F0},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C3},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000F2},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000ED},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000EA},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000EC},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000E7},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000AB},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A6},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000A8},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000A3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000068},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000065},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000002B},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000026},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000028},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000023},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000025},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000022},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000001A},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000017},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000D0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000014},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x10033, 0x00000080},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x00000081},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x00000082},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x00000083},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000191},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000018B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000014D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000010B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000088},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000089},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000093},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000053},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x0000008F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000090},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x00000091},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A0},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x000000A1},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x000000A2},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x000000A3},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x000000A4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000191},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000018B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000014D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000010B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000A9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000093},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000053},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000B0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000B1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C0},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x000000C1},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x000000C2},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x000000C3},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x000000C4},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000158},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000191},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C5},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000011F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000018B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000119},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000014D},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000010B},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C8},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000C9},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000009F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D9},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CA},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x000000D3},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CB},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000005F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000099},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000093},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CD},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000001F},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000053},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000CF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000D0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x00000013},
+ {0xB0000000, 0x00000000},
+ {0x10033, 0x000000D1},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90060001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90080001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x90290001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0x902b0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x1003F, 0x00000007},
+ {0xA0000000, 0x00000000},
+ {0x1003F, 0x0000000D},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10033, 0x00000000},
+ {0x1003F, 0x000000F6},
+ {0x10033, 0x00000001},
+ {0x1003F, 0x000000F3},
+ {0x10033, 0x00000002},
+ {0x1003F, 0x000000F0},
+ {0x10033, 0x00000003},
+ {0x1003F, 0x000000ED},
+ {0x10033, 0x00000004},
+ {0x1003F, 0x000000EA},
+ {0x10033, 0x00000005},
+ {0x1003F, 0x000000E7},
+ {0x10033, 0x00000006},
+ {0x1003F, 0x000000A6},
+ {0x10033, 0x00000007},
+ {0x1003F, 0x000000A3},
+ {0x10033, 0x00000008},
+ {0x1003F, 0x00000063},
+ {0x10033, 0x00000009},
+ {0x1003F, 0x00000060},
+ {0x10033, 0x0000000A},
+ {0x1003F, 0x00000023},
+ {0x10033, 0x0000000B},
+ {0x1003F, 0x00000020},
+ {0x10033, 0x0000000C},
+ {0x1003F, 0x0000001D},
+ {0x10033, 0x0000000D},
+ {0x1003F, 0x0000001A},
+ {0x10033, 0x0000000E},
+ {0x1003F, 0x00000017},
+ {0x10033, 0x0000000F},
+ {0x1003F, 0x00000014},
+ {0x10033, 0x00000010},
+ {0x1003F, 0x00000011},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x10033, 0x00000000},
+ {0x1003F, 0x000001AF},
+ {0x10033, 0x00000001},
+ {0x1003F, 0x000001A9},
+ {0x10033, 0x00000002},
+ {0x1003F, 0x000001A3},
+ {0x10033, 0x00000003},
+ {0x1003F, 0x0000019D},
+ {0x10033, 0x00000004},
+ {0x1003F, 0x00000197},
+ {0x10033, 0x00000005},
+ {0x1003F, 0x0000015F},
+ {0x10033, 0x00000006},
+ {0x1003F, 0x00000159},
+ {0x10033, 0x00000007},
+ {0x1003F, 0x0000011F},
+ {0x10033, 0x00000008},
+ {0x1003F, 0x00000119},
+ {0x10033, 0x00000009},
+ {0x1003F, 0x000000DF},
+ {0x10033, 0x0000000A},
+ {0x1003F, 0x000000D9},
+ {0x10033, 0x0000000B},
+ {0x1003F, 0x0000009F},
+ {0x10033, 0x0000000C},
+ {0x1003F, 0x00000099},
+ {0x10033, 0x0000000D},
+ {0x1003F, 0x0000005F},
+ {0x10033, 0x0000000E},
+ {0x1003F, 0x00000059},
+ {0x10033, 0x0000000F},
+ {0x1003F, 0x0000001F},
+ {0x10033, 0x00000010},
+ {0x1003F, 0x00000019},
+ {0x10033, 0x00000011},
+ {0x1003F, 0x00000013},
+ {0x100EE, 0x00000000},
+ {0x10005, 0x00000001},
+ {0x09F, 0x00000032},
+};
+
+static const struct rtw89_reg2_def rtw89_8852b_phy_nctl_regs[] = {
+ {0x8000, 0x00000008},
+ {0x8008, 0x00000000},
+ {0x8004, 0xf0862966},
+ {0x800c, 0x78000000},
+ {0x8010, 0x88015000},
+ {0x8014, 0x80010100},
+ {0x8018, 0x10010100},
+ {0x801c, 0xa210bc00},
+ {0x8020, 0x000403e0},
+ {0x8024, 0x00072160},
+ {0x8028, 0x00180e00},
+ {0x8030, 0x400000c0},
+ {0x8034, 0x11000830},
+ {0x8038, 0x00000009},
+ {0x803c, 0x00000008},
+ {0x8040, 0x00000046},
+ {0x8044, 0x0010001f},
+ {0x8048, 0xf0000003},
+ {0x804c, 0x62ac6162},
+ {0x8050, 0xf2acf162},
+ {0x8054, 0x62ac6162},
+ {0x8058, 0xf2acf162},
+ {0x805c, 0x150c0b02},
+ {0x8060, 0x150c0b02},
+ {0x8064, 0x2aa00047},
+ {0x8074, 0x80000000},
+ {0x807c, 0x000000ee},
+ {0x8088, 0x80000000},
+ {0x8098, 0x0000ff00},
+ {0x809c, 0x0000001f},
+ {0x80a0, 0x00010300},
+ {0x80b8, 0x00001000},
+ {0x80b0, 0x00000000},
+ {0x80d0, 0x00000000},
+ {0x80ec, 0x00000002},
+ {0x810c, 0x33112200},
+ {0x8110, 0x33112200},
+ {0x8114, 0x00000000},
+ {0x8120, 0x10010000},
+ {0x8124, 0x00000000},
+ {0x812c, 0x0000c000},
+ {0x8138, 0x40000000},
+ {0x813c, 0x40000000},
+ {0x8140, 0x00000000},
+ {0x8144, 0x0b040b03},
+ {0x8148, 0x0a050b04},
+ {0x814c, 0x0a050b04},
+ {0x8150, 0xe4e40000},
+ {0x8158, 0xffffffff},
+ {0x815c, 0xffffffff},
+ {0x8160, 0xffffffff},
+ {0x8164, 0xffffffff},
+ {0x8168, 0xffffffff},
+ {0x816c, 0x1fffffff},
+ {0x81a0, 0x00000000},
+ {0x81ac, 0x003f2e2e},
+ {0x81b0, 0x003f2e2e},
+ {0x81bc, 0x005b5b5b},
+ {0x81c0, 0x005b5b5b},
+ {0x81b4, 0x00600060},
+ {0x81b8, 0x00600060},
+ {0x81cc, 0x00000000},
+ {0x81dc, 0x00000002},
+ {0x81e0, 0x00000000},
+ {0x81e4, 0x00000001},
+ {0x820c, 0x33112200},
+ {0x8210, 0x33112200},
+ {0x8214, 0x00000000},
+ {0x8220, 0x10010000},
+ {0x8224, 0x00000000},
+ {0x822c, 0x0000d000},
+ {0x8238, 0x40000000},
+ {0x823c, 0x40000000},
+ {0x8240, 0x00000000},
+ {0x8244, 0x0b040b03},
+ {0x8248, 0x0a050b04},
+ {0x824c, 0x0a050b04},
+ {0x8250, 0xe4e40000},
+ {0x8258, 0xffffffff},
+ {0x825c, 0xffffffff},
+ {0x8260, 0xffffffff},
+ {0x8264, 0xffffffff},
+ {0x8268, 0xffffffff},
+ {0x826c, 0x1fffffff},
+ {0x82a0, 0x00000000},
+ {0x82ac, 0x003f2e2e},
+ {0x82b0, 0x003f2e2e},
+ {0x82bc, 0x005b5b5b},
+ {0x82c0, 0x005b5b5b},
+ {0x82b4, 0x00600060},
+ {0x82b8, 0x00600060},
+ {0x82cc, 0x00000000},
+ {0x82dc, 0x00000002},
+ {0x82e0, 0x00100000},
+ {0x82e4, 0x00000001},
+ {0x81d8, 0x00000001},
+ {0x82d8, 0x00000001},
+ {0x8d00, 0x00000000},
+ {0x8d04, 0x00000000},
+ {0x8d08, 0x00000000},
+ {0x8d0c, 0x00000000},
+ {0x8d10, 0x00000000},
+ {0x8d14, 0x00000000},
+ {0x8d18, 0x00000000},
+ {0x8d1c, 0x00000000},
+ {0x8d20, 0x00000000},
+ {0x8d24, 0x00000000},
+ {0x8d28, 0x00000000},
+ {0x8d2c, 0x00000000},
+ {0x8d30, 0x00000000},
+ {0x8d34, 0x00000000},
+ {0x8d38, 0x00000000},
+ {0x8d3c, 0x00000000},
+ {0x8d40, 0x00000000},
+ {0x8d44, 0x00000000},
+ {0x8d48, 0x00000000},
+ {0x8d4c, 0x00000000},
+ {0x8d50, 0x00000000},
+ {0x8d54, 0x00000000},
+ {0x8d58, 0x00000000},
+ {0x8d5c, 0x00000000},
+ {0x8d60, 0x00000000},
+ {0x8d64, 0x00000000},
+ {0x8d68, 0x00000000},
+ {0x8d6c, 0x00000000},
+ {0x8d70, 0x00000000},
+ {0x8d74, 0x00000000},
+ {0x8d78, 0x00000000},
+ {0x8d7c, 0x00000000},
+ {0x8d80, 0x00000000},
+ {0x8d84, 0x00000000},
+ {0x8d88, 0x00000000},
+ {0x8d8c, 0x00000000},
+ {0x8d90, 0x00000000},
+ {0x8d94, 0x00000000},
+ {0x8d98, 0x00000000},
+ {0x8d9c, 0x00000000},
+ {0x8da0, 0x00000000},
+ {0x8da4, 0x00000000},
+ {0x8da8, 0x00000000},
+ {0x8dac, 0x00000000},
+ {0x8db0, 0x00000000},
+ {0x8db4, 0x00000000},
+ {0x8db8, 0x00000000},
+ {0x8dbc, 0x00000000},
+ {0x8dc0, 0x00000000},
+ {0x8dc4, 0x00000000},
+ {0x8dc8, 0x00000000},
+ {0x8dcc, 0x00000000},
+ {0x8dd0, 0x00000000},
+ {0x8dd4, 0x00000000},
+ {0x8dd8, 0x00000000},
+ {0x8ddc, 0x00000000},
+ {0x8de0, 0x00000000},
+ {0x8de4, 0x00000000},
+ {0x8de8, 0x00000000},
+ {0x8dec, 0x00000000},
+ {0x8df0, 0x00000000},
+ {0x8df4, 0x00000000},
+ {0x8df8, 0x00000000},
+ {0x8dfc, 0x00000000},
+ {0x8e00, 0x00000000},
+ {0x8e04, 0x00000000},
+ {0x8e08, 0x00000000},
+ {0x8e0c, 0x00000000},
+ {0x8e10, 0x00000000},
+ {0x8e14, 0x00000000},
+ {0x8e18, 0x00000000},
+ {0x8e1c, 0x00000000},
+ {0x8e20, 0x00000000},
+ {0x8e24, 0x00000000},
+ {0x8e28, 0x00000000},
+ {0x8e2c, 0x00000000},
+ {0x8e30, 0x00000000},
+ {0x8e34, 0x00000000},
+ {0x8e38, 0x00000000},
+ {0x8e3c, 0x00000000},
+ {0x8e40, 0x00000000},
+ {0x8e44, 0x00000000},
+ {0x8e48, 0x00000000},
+ {0x8e4c, 0x00000000},
+ {0x8e50, 0x00000000},
+ {0x8e54, 0x00000000},
+ {0x8e58, 0x00000000},
+ {0x8e5c, 0x00000000},
+ {0x8e60, 0x00000000},
+ {0x8e64, 0x00000000},
+ {0x8e68, 0x00000000},
+ {0x8e6c, 0x00000000},
+ {0x8e70, 0x00000000},
+ {0x8e74, 0x00000000},
+ {0x8e78, 0x00000000},
+ {0x8e7c, 0x00000000},
+ {0x8e80, 0x00000000},
+ {0x8e84, 0x00000000},
+ {0x8e88, 0x00000000},
+ {0x8e8c, 0x00000000},
+ {0x8e90, 0x00000000},
+ {0x8e94, 0x00000000},
+ {0x8e98, 0x00000000},
+ {0x8e9c, 0x00000000},
+ {0x8ea0, 0x00000000},
+ {0x8ea4, 0x00000000},
+ {0x8ea8, 0x00000000},
+ {0x8eac, 0x00000000},
+ {0x8eb0, 0x00000000},
+ {0x8eb4, 0x00000000},
+ {0x8eb8, 0x00000000},
+ {0x8ebc, 0x00000000},
+ {0x8ec0, 0x00000000},
+ {0x8ec4, 0x00000000},
+ {0x8ec8, 0x00000000},
+ {0x8ecc, 0x00000000},
+ {0x8ed0, 0x00000000},
+ {0x8ed4, 0x00000000},
+ {0x8ed8, 0x00000000},
+ {0x8edc, 0x00000000},
+ {0x8ee0, 0x00000000},
+ {0x8ee4, 0x00000000},
+ {0x8ee8, 0x00000000},
+ {0x8eec, 0x00000000},
+ {0x8ef0, 0x00000000},
+ {0x8ef4, 0x00000000},
+ {0x8ef8, 0x00000000},
+ {0x8efc, 0x00000000},
+ {0x8f00, 0x00000000},
+ {0x8f04, 0x00000000},
+ {0x8f08, 0x00000000},
+ {0x8f0c, 0x00000000},
+ {0x8f10, 0x00000000},
+ {0x8f14, 0x00000000},
+ {0x8f18, 0x00000000},
+ {0x8f1c, 0x00000000},
+ {0x8f20, 0x00000000},
+ {0x8f24, 0x00000000},
+ {0x8f28, 0x00000000},
+ {0x8f2c, 0x00000000},
+ {0x8f30, 0x00000000},
+ {0x8f34, 0x00000000},
+ {0x8f38, 0x00000000},
+ {0x8f3c, 0x00000000},
+ {0x8f40, 0x00000000},
+ {0x8f44, 0x00000000},
+ {0x8f48, 0x00000000},
+ {0x8f4c, 0x00000000},
+ {0x8f50, 0x00000000},
+ {0x8f54, 0x00000000},
+ {0x8f58, 0x00000000},
+ {0x8f5c, 0x00000000},
+ {0x8f60, 0x00000000},
+ {0x8f64, 0x00000000},
+ {0x8f68, 0x00000000},
+ {0x8f6c, 0x00000000},
+ {0x8f70, 0x00000000},
+ {0x8f74, 0x00000000},
+ {0x8f78, 0x00000000},
+ {0x8f7c, 0x00000000},
+ {0x8f80, 0x00000000},
+ {0x8f84, 0x00000000},
+ {0x8f88, 0x00000000},
+ {0x8f8c, 0x00000000},
+ {0x8f90, 0x00000000},
+ {0x8f94, 0x00000000},
+ {0x8f98, 0x00000000},
+ {0x8f9c, 0x00000000},
+ {0x8fa0, 0x00000000},
+ {0x8fa4, 0x00000000},
+ {0x8fa8, 0x00000000},
+ {0x8fac, 0x00000000},
+ {0x8fb0, 0x00000000},
+ {0x8fb4, 0x00000000},
+ {0x8fb8, 0x00000000},
+ {0x8fbc, 0x00000000},
+ {0x8fc0, 0x00000000},
+ {0x8fc4, 0x00000000},
+ {0x8fc8, 0x00000000},
+ {0x8fcc, 0x00000000},
+ {0x8fd0, 0x00000000},
+ {0x8fd4, 0x00000000},
+ {0x8fd8, 0x00000000},
+ {0x8fdc, 0x00000000},
+ {0x8fe0, 0x00000000},
+ {0x8fe4, 0x00000000},
+ {0x8fe8, 0x00000000},
+ {0x8fec, 0x00000000},
+ {0x8ff0, 0x00000000},
+ {0x8ff4, 0x00000000},
+ {0x8ff8, 0x00000000},
+ {0x8ffc, 0x00000000},
+ {0x9000, 0x00000000},
+ {0x9004, 0x00000000},
+ {0x9008, 0x00000000},
+ {0x900c, 0x00000000},
+ {0x9010, 0x00000000},
+ {0x9014, 0x00000000},
+ {0x9018, 0x00000000},
+ {0x901c, 0x00000000},
+ {0x9020, 0x00000000},
+ {0x9024, 0x00000000},
+ {0x9028, 0x00000000},
+ {0x902c, 0x00000000},
+ {0x9030, 0x00000000},
+ {0x9034, 0x00000000},
+ {0x9038, 0x00000000},
+ {0x903c, 0x00000000},
+ {0x9040, 0x00000000},
+ {0x9044, 0x00000000},
+ {0x9048, 0x00000000},
+ {0x904c, 0x00000000},
+ {0x9050, 0x00000000},
+ {0x9054, 0x00000000},
+ {0x9058, 0x00000000},
+ {0x905c, 0x00000000},
+ {0x9060, 0x00000000},
+ {0x9064, 0x00000000},
+ {0x9068, 0x00000000},
+ {0x906c, 0x00000000},
+ {0x9070, 0x00000000},
+ {0x9074, 0x00000000},
+ {0x9078, 0x00000000},
+ {0x907c, 0x00000000},
+ {0x9080, 0x00000000},
+ {0x9084, 0x00000000},
+ {0x9088, 0x00000000},
+ {0x908c, 0x00000000},
+ {0x9090, 0x00000000},
+ {0x9094, 0x00000000},
+ {0x9098, 0x00000000},
+ {0x909c, 0x00000000},
+ {0x90a0, 0x00000000},
+ {0x90a4, 0x00000000},
+ {0x90a8, 0x00000000},
+ {0x90ac, 0x00000000},
+ {0x90b0, 0x00000000},
+ {0x90b4, 0x00000000},
+ {0x90b8, 0x00000000},
+ {0x90bc, 0x00000000},
+ {0x9100, 0x00000000},
+ {0x9104, 0x00000000},
+ {0x9108, 0x00000000},
+ {0x910c, 0x00000000},
+ {0x9110, 0x00000000},
+ {0x9114, 0x00000000},
+ {0x9118, 0x00000000},
+ {0x911c, 0x00000000},
+ {0x9120, 0x00000000},
+ {0x9124, 0x00000000},
+ {0x9128, 0x00000000},
+ {0x912c, 0x00000000},
+ {0x9130, 0x00000000},
+ {0x9134, 0x00000000},
+ {0x9138, 0x00000000},
+ {0x913c, 0x00000000},
+ {0x9140, 0x00000000},
+ {0x9144, 0x00000000},
+ {0x9148, 0x00000000},
+ {0x914c, 0x00000000},
+ {0x9150, 0x00000000},
+ {0x9154, 0x00000000},
+ {0x9158, 0x00000000},
+ {0x915c, 0x00000000},
+ {0x9160, 0x00000000},
+ {0x9164, 0x00000000},
+ {0x9168, 0x00000000},
+ {0x916c, 0x00000000},
+ {0x9170, 0x00000000},
+ {0x9174, 0x00000000},
+ {0x9178, 0x00000000},
+ {0x917c, 0x00000000},
+ {0x9180, 0x00000000},
+ {0x9184, 0x00000000},
+ {0x9188, 0x00000000},
+ {0x918c, 0x00000000},
+ {0x9190, 0x00000000},
+ {0x9194, 0x00000000},
+ {0x9198, 0x00000000},
+ {0x919c, 0x00000000},
+ {0x91a0, 0x00000000},
+ {0x91a4, 0x00000000},
+ {0x91a8, 0x00000000},
+ {0x91ac, 0x00000000},
+ {0x91b0, 0x00000000},
+ {0x91b4, 0x00000000},
+ {0x91b8, 0x00000000},
+ {0x91bc, 0x00000000},
+ {0x91c0, 0x00000000},
+ {0x91c4, 0x00000000},
+ {0x91c8, 0x00000000},
+ {0x91cc, 0x00000000},
+ {0x91d0, 0x00000000},
+ {0x91d4, 0x00000000},
+ {0x91d8, 0x00000000},
+ {0x91dc, 0x00000000},
+ {0x91e0, 0x00000000},
+ {0x91e4, 0x00000000},
+ {0x91e8, 0x00000000},
+ {0x91ec, 0x00000000},
+ {0x91f0, 0x00000000},
+ {0x91f4, 0x00000000},
+ {0x91f8, 0x00000000},
+ {0x91fc, 0x00000000},
+ {0x9200, 0x00000000},
+ {0x9204, 0x00000000},
+ {0x9208, 0x00000000},
+ {0x920c, 0x00000000},
+ {0x9210, 0x00000000},
+ {0x9214, 0x00000000},
+ {0x9218, 0x00000000},
+ {0x921c, 0x00000000},
+ {0x9220, 0x00000000},
+ {0x9224, 0x00000000},
+ {0x9228, 0x00000000},
+ {0x922c, 0x00000000},
+ {0x9230, 0x00000000},
+ {0x9234, 0x00000000},
+ {0x9238, 0x00000000},
+ {0x923c, 0x00000000},
+ {0x9240, 0x00000000},
+ {0x9244, 0x00000000},
+ {0x9248, 0x00000000},
+ {0x924c, 0x00000000},
+ {0x9250, 0x00000000},
+ {0x9254, 0x00000000},
+ {0x9258, 0x00000000},
+ {0x925c, 0x00000000},
+ {0x9260, 0x00000000},
+ {0x9264, 0x00000000},
+ {0x9268, 0x00000000},
+ {0x926c, 0x00000000},
+ {0x9270, 0x00000000},
+ {0x9274, 0x00000000},
+ {0x9278, 0x00000000},
+ {0x927c, 0x00000000},
+ {0x9280, 0x00000000},
+ {0x9284, 0x00000000},
+ {0x9288, 0x00000000},
+ {0x928c, 0x00000000},
+ {0x9290, 0x00000000},
+ {0x9294, 0x00000000},
+ {0x9298, 0x00000000},
+ {0x929c, 0x00000000},
+ {0x92a0, 0x00000000},
+ {0x92a4, 0x00000000},
+ {0x92a8, 0x00000000},
+ {0x92ac, 0x00000000},
+ {0x92b0, 0x00000000},
+ {0x92b4, 0x00000000},
+ {0x92b8, 0x00000000},
+ {0x92bc, 0x00000000},
+ {0x92c0, 0x00000000},
+ {0x92c4, 0x00000000},
+ {0x92c8, 0x00000000},
+ {0x92cc, 0x00000000},
+ {0x92d0, 0x00000000},
+ {0x92d4, 0x00000000},
+ {0x92d8, 0x00000000},
+ {0x92dc, 0x00000000},
+ {0x92e0, 0x00000000},
+ {0x92e4, 0x00000000},
+ {0x92e8, 0x00000000},
+ {0x92ec, 0x00000000},
+ {0x92f0, 0x00000000},
+ {0x92f4, 0x00000000},
+ {0x92f8, 0x00000000},
+ {0x92fc, 0x00000000},
+ {0x9300, 0x00000000},
+ {0x9304, 0x00000000},
+ {0x9308, 0x00000000},
+ {0x930c, 0x00000000},
+ {0x9310, 0x00000000},
+ {0x9314, 0x00000000},
+ {0x9318, 0x00000000},
+ {0x931c, 0x00000000},
+ {0x9320, 0x00000000},
+ {0x9324, 0x00000000},
+ {0x9328, 0x00000000},
+ {0x932c, 0x00000000},
+ {0x9330, 0x00000000},
+ {0x9334, 0x00000000},
+ {0x9338, 0x00000000},
+ {0x933c, 0x00000000},
+ {0x9340, 0x00000000},
+ {0x9344, 0x00000000},
+ {0x9348, 0x00000000},
+ {0x934c, 0x00000000},
+ {0x9350, 0x00000000},
+ {0x9354, 0x00000000},
+ {0x9358, 0x00000000},
+ {0x935c, 0x00000000},
+ {0x9360, 0x00000000},
+ {0x9364, 0x00000000},
+ {0x9368, 0x00000000},
+ {0x936c, 0x00000000},
+ {0x9370, 0x00000000},
+ {0x9374, 0x00000000},
+ {0x9378, 0x00000000},
+ {0x937c, 0x00000000},
+ {0x9380, 0x00000000},
+ {0x9384, 0x00000000},
+ {0x9388, 0x00000000},
+ {0x938c, 0x00000000},
+ {0x9390, 0x00000000},
+ {0x9394, 0x00000000},
+ {0x9398, 0x00000000},
+ {0x939c, 0x00000000},
+ {0x93a0, 0x00000000},
+ {0x93a4, 0x00000000},
+ {0x93a8, 0x00000000},
+ {0x93ac, 0x00000000},
+ {0x93b0, 0x00000000},
+ {0x93b4, 0x00000000},
+ {0x93b8, 0x00000000},
+ {0x93bc, 0x00000000},
+ {0x93c0, 0x00000000},
+ {0x93c4, 0x00000000},
+ {0x93c8, 0x00000000},
+ {0x93cc, 0x00000000},
+ {0x93d0, 0x00000000},
+ {0x93d4, 0x00000000},
+ {0x93d8, 0x00000000},
+ {0x93dc, 0x00000000},
+ {0x93e0, 0x00000000},
+ {0x93e4, 0x00000000},
+ {0x93e8, 0x00000000},
+ {0x93ec, 0x00000000},
+ {0x93f0, 0x00000000},
+ {0x93f4, 0x00000000},
+ {0x93f8, 0x00000000},
+ {0x93fc, 0x00000000},
+ {0x9400, 0x00000000},
+ {0x9404, 0x00000000},
+ {0x9408, 0x00000000},
+ {0x940c, 0x00000000},
+ {0x9410, 0x00000000},
+ {0x9414, 0x00000000},
+ {0x9418, 0x00000000},
+ {0x941c, 0x00000000},
+ {0x9420, 0x00000000},
+ {0x9424, 0x00000000},
+ {0x9428, 0x00000000},
+ {0x942c, 0x00000000},
+ {0x9430, 0x00000000},
+ {0x9434, 0x00000000},
+ {0x9438, 0x00000000},
+ {0x943c, 0x00000000},
+ {0x9440, 0x00000000},
+ {0x9444, 0x00000000},
+ {0x9448, 0x00000000},
+ {0x944c, 0x00000000},
+ {0x9450, 0x00000000},
+ {0x9454, 0x00000000},
+ {0x9458, 0x00000000},
+ {0x945c, 0x00000000},
+ {0x9460, 0x00000000},
+ {0x9464, 0x00000000},
+ {0x9468, 0x00000000},
+ {0x946c, 0x00000000},
+ {0x9470, 0x00000000},
+ {0x9474, 0x00000000},
+ {0x9478, 0x00000000},
+ {0x947c, 0x00000000},
+ {0x9480, 0x00000000},
+ {0x9484, 0x00000000},
+ {0x9488, 0x00000000},
+ {0x948c, 0x00000000},
+ {0x9490, 0x00000000},
+ {0x9494, 0x00000000},
+ {0x9498, 0x00000000},
+ {0x949c, 0x00000000},
+ {0x94a0, 0x00000000},
+ {0x94a4, 0x00000000},
+ {0x94a8, 0x00000000},
+ {0x94ac, 0x00000000},
+ {0x94b0, 0x00000000},
+ {0x94b4, 0x00000000},
+ {0x94b8, 0x00000000},
+ {0x94bc, 0x00000000},
+ {0xa220, 0x00000000},
+ {0xa224, 0x00000000},
+ {0xa228, 0x00000000},
+ {0xa22c, 0x00000000},
+ {0xa230, 0x00000000},
+ {0xa234, 0x00000000},
+ {0xa238, 0x00000000},
+ {0xa23c, 0x00000000},
+ {0xa240, 0x00000000},
+ {0xa244, 0x00000000},
+ {0xa248, 0x00000000},
+ {0xa24c, 0x00000000},
+ {0xa250, 0x00000000},
+ {0xa254, 0x00000000},
+ {0xa258, 0x00000000},
+ {0xa25c, 0x00000000},
+ {0xa260, 0x00000000},
+ {0xa264, 0x00000000},
+ {0xa268, 0x00000000},
+ {0xa26c, 0x00000000},
+ {0xa270, 0x00000000},
+ {0xa274, 0x00000000},
+ {0xa278, 0x00000000},
+ {0xa27c, 0x00000000},
+ {0xa280, 0x00000000},
+ {0xa284, 0x00000000},
+ {0xa288, 0x00000000},
+ {0xa28c, 0x00000000},
+ {0xa290, 0x00000000},
+ {0xa294, 0x00000000},
+ {0xa298, 0x00000000},
+ {0xa29c, 0x00000000},
+ {0xa2a0, 0x00000000},
+ {0xa2a4, 0x00000000},
+ {0xa2a8, 0x00000000},
+ {0xa2ac, 0x00000000},
+ {0xa2b0, 0x00000000},
+ {0xa2b4, 0x00000000},
+ {0xa2b8, 0x00000000},
+ {0xa2bc, 0x00000000},
+ {0xa2c0, 0x00000000},
+ {0xa2c4, 0x00000000},
+ {0xa2c8, 0x00000000},
+ {0xa2cc, 0x00000000},
+ {0xa2d0, 0x00000000},
+ {0xa2d4, 0x00000000},
+ {0xa2d8, 0x00000000},
+ {0xa2dc, 0x00000000},
+ {0xa2e0, 0x00000000},
+ {0xa2e4, 0x00000000},
+ {0xa2e8, 0x00000000},
+ {0xa2ec, 0x00000000},
+ {0xa2f0, 0x00000000},
+ {0xa2f4, 0x00000000},
+ {0xa2f8, 0x00000000},
+ {0xa2fc, 0x00000000},
+ {0xa300, 0x00000000},
+ {0xa304, 0x00000000},
+ {0xa308, 0x00000000},
+ {0xa30c, 0x00000000},
+ {0xa310, 0x00000000},
+ {0xa314, 0x00000000},
+ {0xa318, 0x00000000},
+ {0xa31c, 0x00000000},
+ {0xa320, 0x00000000},
+ {0xa324, 0x00000000},
+ {0xa328, 0x00000000},
+ {0xa32c, 0x00000000},
+ {0xa330, 0x00000000},
+ {0xa334, 0x00000000},
+ {0xa338, 0x00000000},
+ {0xa33c, 0x00000000},
+ {0xa340, 0x00000000},
+ {0xa344, 0x00000000},
+ {0xa348, 0x00000000},
+ {0xa34c, 0x00000000},
+ {0xa350, 0x00000000},
+ {0xa354, 0x00000000},
+ {0xa358, 0x00000000},
+ {0xa35c, 0x00000000},
+ {0xa360, 0x00000000},
+ {0xa364, 0x00000000},
+ {0xa368, 0x00000000},
+ {0xa36c, 0x00000000},
+ {0xa370, 0x00000000},
+ {0xa374, 0x00000000},
+ {0xa378, 0x00000000},
+ {0xa37c, 0x00000000},
+ {0xa380, 0x00000000},
+ {0xa384, 0x00000000},
+ {0xa388, 0x00000000},
+ {0xa38c, 0x00000000},
+ {0xa390, 0x00000000},
+ {0xa394, 0x00000000},
+ {0xa398, 0x00000000},
+ {0xa39c, 0x00000000},
+ {0xa3a0, 0x00000000},
+ {0xa3a4, 0x00000000},
+ {0xa3a8, 0x00000000},
+ {0xa3ac, 0x00000000},
+ {0xa3b0, 0x00000000},
+ {0xa3b4, 0x00000000},
+ {0xa3b8, 0x00000000},
+ {0xa3bc, 0x00000000},
+ {0xa620, 0x00000000},
+ {0xa624, 0x00000000},
+ {0xa628, 0x00000000},
+ {0xa62c, 0x00000000},
+ {0xa630, 0x00000000},
+ {0xa634, 0x00000000},
+ {0xa638, 0x00000000},
+ {0xa63c, 0x00000000},
+ {0xa640, 0x00000000},
+ {0xa644, 0x00000000},
+ {0xa648, 0x00000000},
+ {0xa64c, 0x00000000},
+ {0xa650, 0x00000000},
+ {0xa654, 0x00000000},
+ {0xa658, 0x00000000},
+ {0xa65c, 0x00000000},
+ {0xa660, 0x00000000},
+ {0xa664, 0x00000000},
+ {0xa668, 0x00000000},
+ {0xa66c, 0x00000000},
+ {0xa670, 0x00000000},
+ {0xa674, 0x00000000},
+ {0xa678, 0x00000000},
+ {0xa67c, 0x00000000},
+ {0xa680, 0x00000000},
+ {0xa684, 0x00000000},
+ {0xa688, 0x00000000},
+ {0xa68c, 0x00000000},
+ {0xa690, 0x00000000},
+ {0xa694, 0x00000000},
+ {0xa698, 0x00000000},
+ {0xa69c, 0x00000000},
+ {0xa6a0, 0x00000000},
+ {0xa6a4, 0x00000000},
+ {0xa6a8, 0x00000000},
+ {0xa6ac, 0x00000000},
+ {0xa6b0, 0x00000000},
+ {0xa6b4, 0x00000000},
+ {0xa6b8, 0x00000000},
+ {0xa6bc, 0x00000000},
+ {0xa6c0, 0x00000000},
+ {0xa6c4, 0x00000000},
+ {0xa6c8, 0x00000000},
+ {0xa6cc, 0x00000000},
+ {0xa6d0, 0x00000000},
+ {0xa6d4, 0x00000000},
+ {0xa6d8, 0x00000000},
+ {0xa6dc, 0x00000000},
+ {0xa6e0, 0x00000000},
+ {0xa6e4, 0x00000000},
+ {0xa6e8, 0x00000000},
+ {0xa6ec, 0x00000000},
+ {0xa6f0, 0x00000000},
+ {0xa6f4, 0x00000000},
+ {0xa6f8, 0x00000000},
+ {0xa6fc, 0x00000000},
+ {0xa700, 0x00000000},
+ {0xa704, 0x00000000},
+ {0xa708, 0x00000000},
+ {0xa70c, 0x00000000},
+ {0xa710, 0x00000000},
+ {0xa714, 0x00000000},
+ {0xa718, 0x00000000},
+ {0xa71c, 0x00000000},
+ {0xa720, 0x00000000},
+ {0xa724, 0x00000000},
+ {0xa728, 0x00000000},
+ {0xa72c, 0x00000000},
+ {0xa730, 0x00000000},
+ {0xa734, 0x00000000},
+ {0xa738, 0x00000000},
+ {0xa73c, 0x00000000},
+ {0xa740, 0x00000000},
+ {0xa744, 0x00000000},
+ {0xa748, 0x00000000},
+ {0xa74c, 0x00000000},
+ {0xa750, 0x00000000},
+ {0xa754, 0x00000000},
+ {0xa758, 0x00000000},
+ {0xa75c, 0x00000000},
+ {0xa760, 0x00000000},
+ {0xa764, 0x00000000},
+ {0xa768, 0x00000000},
+ {0xa76c, 0x00000000},
+ {0xa770, 0x00000000},
+ {0xa774, 0x00000000},
+ {0xa778, 0x00000000},
+ {0xa77c, 0x00000000},
+ {0xa780, 0x00000000},
+ {0xa784, 0x00000000},
+ {0xa788, 0x00000000},
+ {0xa78c, 0x00000000},
+ {0xa790, 0x00000000},
+ {0xa794, 0x00000000},
+ {0xa798, 0x00000000},
+ {0xa79c, 0x00000000},
+ {0xa7a0, 0x00000000},
+ {0xa7a4, 0x00000000},
+ {0xa7a8, 0x00000000},
+ {0xa7ac, 0x00000000},
+ {0xa7b0, 0x00000000},
+ {0xa7b4, 0x00000000},
+ {0xa7b8, 0x00000000},
+ {0xa7bc, 0x00000000},
+ {0x81d8, 0x00000000},
+ {0x82d8, 0x00000000},
+ {0x9f04, 0x2b251f19},
+ {0x9f08, 0x433d3731},
+ {0x9f0c, 0x5b554f49},
+ {0x9f10, 0x736d6761},
+ {0x9f14, 0x7f7f7f79},
+ {0x9f18, 0x120f7f7f},
+ {0x9f1c, 0x1e1b1815},
+ {0x9f20, 0x2a272421},
+ {0x9f24, 0x3633302d},
+ {0x9f28, 0x3f3f3c39},
+ {0x9f2c, 0x3f3f3f3f},
+ {0x8008, 0x00000080},
+ {0x8088, 0x807f030a},
+ {0x80c8, 0x708f0bf1},
+ {0x80c8, 0x708e0aa5},
+ {0x80c8, 0x708d097d},
+ {0x80c8, 0x708c0875},
+ {0x80c8, 0x708b0789},
+ {0x80c8, 0x708a06b7},
+ {0x80c8, 0x708905fc},
+ {0x80c8, 0x70880556},
+ {0x80c8, 0x708704c1},
+ {0x80c8, 0x7086043d},
+ {0x80c8, 0x708503c7},
+ {0x80c8, 0x7084035e},
+ {0x80c8, 0x708302ac},
+ {0x80c8, 0x70820262},
+ {0x80c8, 0x70810220},
+ {0x80c8, 0x70800000},
+ {0x80c8, 0x7090011f},
+ {0x80c8, 0x7010011f},
+ {0x8088, 0x80000000},
+ {0x8008, 0x00000000},
+ {0x8088, 0x00000110},
+ {0x8000, 0x00000008},
+ {0x8080, 0x00000005},
+ {0x8500, 0x80000008},
+ {0x8504, 0x43000004},
+ {0x8508, 0x4b044a00},
+ {0x850c, 0x40098604},
+ {0x8510, 0x0004e020},
+ {0x8514, 0x87044b05},
+ {0x8518, 0xe020400b},
+ {0x851c, 0x4b000004},
+ {0x8520, 0x21e07410},
+ {0x8524, 0x74300000},
+ {0x8528, 0x43800004},
+ {0x852c, 0x4c000007},
+ {0x8530, 0x43000004},
+ {0x8534, 0x42fe5700},
+ {0x8538, 0x42004000},
+ {0x853c, 0x30005055},
+ {0x8540, 0xa50fb41a},
+ {0x8544, 0xf11ce3c7},
+ {0x8548, 0xf31cf21c},
+ {0x854c, 0xf61cf41c},
+ {0x8550, 0xf91cf81c},
+ {0x8554, 0xfb1cfa1c},
+ {0x8558, 0xfd1cfc1c},
+ {0x855c, 0xff1cfe1c},
+ {0x8560, 0xf11cf01c},
+ {0x8564, 0xf31cf21c},
+ {0x8568, 0xf51cf41c},
+ {0x856c, 0xf71cf61c},
+ {0x8570, 0xf91cf81c},
+ {0x8574, 0xe3c7a504},
+ {0x8578, 0xf11af01a},
+ {0x857c, 0x30580001},
+ {0x8580, 0x30b030c9},
+ {0x8584, 0x30ff30fc},
+ {0x8588, 0x310f3102},
+ {0x858c, 0x3148311c},
+ {0x8590, 0x31603158},
+ {0x8594, 0x30c7320e},
+ {0x8598, 0x32293225},
+ {0x859c, 0x32433242},
+ {0x85a0, 0x3286327a},
+ {0x85a4, 0x329d328a},
+ {0x85a8, 0x32aa32a8},
+ {0x85ac, 0x320331c5},
+ {0x85b0, 0x7410e2c1},
+ {0x85b4, 0x020020a8},
+ {0x85b8, 0x2098140f},
+ {0x85bc, 0x140f0200},
+ {0x85c0, 0x02002088},
+ {0x85c4, 0x7430140f},
+ {0x85c8, 0x5b10e31c},
+ {0x85cc, 0x20a87410},
+ {0x85d0, 0x140f0201},
+ {0x85d4, 0x00002080},
+ {0x85d8, 0x5507140f},
+ {0x85dc, 0x5c065661},
+ {0x85e0, 0x7410e308},
+ {0x85e4, 0x02002088},
+ {0x85e8, 0x5517140f},
+ {0x85ec, 0x7410e308},
+ {0x85f0, 0x020020a8},
+ {0x85f4, 0x5517140f},
+ {0x85f8, 0x5c025641},
+ {0x85fc, 0x7410e308},
+ {0x8600, 0x00002080},
+ {0x8604, 0x1407140f},
+ {0x8608, 0xe3085507},
+ {0x860c, 0x7508e2b4},
+ {0x8610, 0xe312468e},
+ {0x8614, 0x5b10e0f4},
+ {0x8618, 0x20a87410},
+ {0x861c, 0x140f0201},
+ {0x8620, 0x00002090},
+ {0x8624, 0x5507140f},
+ {0x8628, 0x5c065661},
+ {0x862c, 0x7410e308},
+ {0x8630, 0x02002098},
+ {0x8634, 0x5517140f},
+ {0x8638, 0x7410e308},
+ {0x863c, 0x020020a8},
+ {0x8640, 0x5517140f},
+ {0x8644, 0x5c025641},
+ {0x8648, 0x7410e308},
+ {0x864c, 0x00002090},
+ {0x8650, 0x5507140f},
+ {0x8654, 0x7509e308},
+ {0x8658, 0xe3124696},
+ {0x865c, 0x0001e0f4},
+ {0x8660, 0x74105b10},
+ {0x8664, 0x000020a0},
+ {0x8668, 0x5507140f},
+ {0x866c, 0xe3085601},
+ {0x8670, 0x20a87410},
+ {0x8674, 0x140f0200},
+ {0x8678, 0xe3085517},
+ {0x867c, 0x750ae2b4},
+ {0x8680, 0xe3124686},
+ {0x8684, 0x5500e0f4},
+ {0x8688, 0x5501e304},
+ {0x868c, 0xe2c10001},
+ {0x8690, 0x5b10e31c},
+ {0x8694, 0x20807410},
+ {0x8698, 0x140f0000},
+ {0x869c, 0x02002098},
+ {0x86a0, 0xf204140f},
+ {0x86a4, 0x020020a8},
+ {0x86a8, 0x5507140f},
+ {0x86ac, 0xe3085601},
+ {0x86b0, 0x20887410},
+ {0x86b4, 0x140f0200},
+ {0x86b8, 0xe3085517},
+ {0x86bc, 0x7508e2b4},
+ {0x86c0, 0xe312468e},
+ {0x86c4, 0x7410e0f4},
+ {0x86c8, 0x00002090},
+ {0x86cc, 0x5507140f},
+ {0x86d0, 0x7410e308},
+ {0x86d4, 0x02002098},
+ {0x86d8, 0x5517140f},
+ {0x86dc, 0x7509e308},
+ {0x86e0, 0xe3124696},
+ {0x86e4, 0x0001e0f4},
+ {0x86e8, 0x74207900},
+ {0x86ec, 0x57005710},
+ {0x86f0, 0x9700140f},
+ {0x86f4, 0x00017430},
+ {0x86f8, 0xe31ce2c1},
+ {0x86fc, 0xe2ca0001},
+ {0x8700, 0x0001e34b},
+ {0x8704, 0x312ae2c1},
+ {0x8708, 0xe3ba0023},
+ {0x870c, 0x54ed0002},
+ {0x8710, 0x00230baa},
+ {0x8714, 0x0002e3ba},
+ {0x8718, 0xe2b9e367},
+ {0x871c, 0xe2c10001},
+ {0x8720, 0x00223125},
+ {0x8724, 0x0002e3ba},
+ {0x8728, 0x0baa54ec},
+ {0x872c, 0xe3ba0022},
+ {0x8730, 0xe3670002},
+ {0x8734, 0x0001e2b9},
+ {0x8738, 0x0baae2c1},
+ {0x873c, 0x6d0f6c67},
+ {0x8740, 0xe3bae31c},
+ {0x8744, 0xe31c6c8b},
+ {0x8748, 0x0bace3ba},
+ {0x874c, 0x6d0f6cb3},
+ {0x8750, 0xe3bae31c},
+ {0x8754, 0x6cdb0bad},
+ {0x8758, 0xe31c6d0f},
+ {0x875c, 0x6cf7e3ba},
+ {0x8760, 0xe31c6d0f},
+ {0x8764, 0x6c09e3ba},
+ {0x8768, 0xe31c6d00},
+ {0x876c, 0x6c25e3ba},
+ {0x8770, 0xe3bae31c},
+ {0x8774, 0x6c4df8ca},
+ {0x8778, 0xe3bae31c},
+ {0x877c, 0x6c75f9d3},
+ {0x8780, 0xe3bae31c},
+ {0x8784, 0xe31c6c99},
+ {0x8788, 0xe367e3ba},
+ {0x878c, 0x0001e2b9},
+ {0x8790, 0x4380e2ca},
+ {0x8794, 0x43006344},
+ {0x8798, 0x00223188},
+ {0x879c, 0x0002e3bf},
+ {0x87a0, 0x0baa54ec},
+ {0x87a4, 0xe3bf0022},
+ {0x87a8, 0xe3670002},
+ {0x87ac, 0x0001e2c5},
+ {0x87b0, 0x4380e2ca},
+ {0x87b4, 0x43006344},
+ {0x87b8, 0xe367317b},
+ {0x87bc, 0x0001e2c5},
+ {0x87c0, 0x4380e2ca},
+ {0x87c4, 0x4300634d},
+ {0x87c8, 0x74100ba6},
+ {0x87cc, 0x000921e8},
+ {0x87d0, 0x6f0f6e67},
+ {0x87d4, 0xe3bfe34b},
+ {0x87d8, 0x000a21e8},
+ {0x87dc, 0xe34b6e77},
+ {0x87e0, 0x21e8e3bf},
+ {0x87e4, 0x6e8b000b},
+ {0x87e8, 0xe3bfe34b},
+ {0x87ec, 0x000c21e8},
+ {0x87f0, 0xe34b6e9f},
+ {0x87f4, 0x0baae3bf},
+ {0x87f8, 0x21e87410},
+ {0x87fc, 0x6eb3000d},
+ {0x8800, 0xe34b6f0f},
+ {0x8804, 0x21e8e3bf},
+ {0x8808, 0x6ec7000e},
+ {0x880c, 0xe3bfe34b},
+ {0x8810, 0x74100bac},
+ {0x8814, 0x000f21e8},
+ {0x8818, 0x6f0f6edb},
+ {0x881c, 0xe3bfe34b},
+ {0x8820, 0x001021e8},
+ {0x8824, 0xe34b6eef},
+ {0x8828, 0xe3bfe3bf},
+ {0x882c, 0x001321e8},
+ {0x8830, 0x6f006e11},
+ {0x8834, 0xe3bfe34b},
+ {0x8838, 0x21e8e3bf},
+ {0x883c, 0x6e250014},
+ {0x8840, 0xe3bfe34b},
+ {0x8844, 0x21e8fbab},
+ {0x8848, 0x6e390015},
+ {0x884c, 0xe3bfe34b},
+ {0x8850, 0x001621e8},
+ {0x8854, 0xe34b6e4d},
+ {0x8858, 0xfcb0e3bf},
+ {0x885c, 0x001721e8},
+ {0x8860, 0xe34b6e61},
+ {0x8864, 0x21e8e3bf},
+ {0x8868, 0x6e750018},
+ {0x886c, 0xe3bfe34b},
+ {0x8870, 0x001921e8},
+ {0x8874, 0xe34b6e89},
+ {0x8878, 0x21e8e3bf},
+ {0x887c, 0x6e99001a},
+ {0x8880, 0xe3bfe34b},
+ {0x8884, 0xe2c5e367},
+ {0x8888, 0x00040001},
+ {0x888c, 0x42fc0004},
+ {0x8890, 0x60010007},
+ {0x8894, 0x42000004},
+ {0x8898, 0x62200007},
+ {0x889c, 0x00046200},
+ {0x88a0, 0x5b005501},
+ {0x88a4, 0x5b40e304},
+ {0x88a8, 0x00076605},
+ {0x88ac, 0x63006200},
+ {0x88b0, 0x0004e388},
+ {0x88b4, 0x0a010900},
+ {0x88b8, 0x0d000b40},
+ {0x88bc, 0x00320e01},
+ {0x88c0, 0x95090004},
+ {0x88c4, 0x790442fb},
+ {0x88c8, 0x43804200},
+ {0x88cc, 0x4d010007},
+ {0x88d0, 0x43000004},
+ {0x88d4, 0x05620007},
+ {0x88d8, 0x961d05a3},
+ {0x88dc, 0x0004e388},
+ {0x88e0, 0x0007e304},
+ {0x88e4, 0x07a306a2},
+ {0x88e8, 0x0004e388},
+ {0x88ec, 0xe378e304},
+ {0x88f0, 0xe3800002},
+ {0x88f4, 0x00074380},
+ {0x88f8, 0x00044d00},
+ {0x88fc, 0x42fe4300},
+ {0x8900, 0x42007900},
+ {0x8904, 0x00040001},
+ {0x8908, 0x000742fc},
+ {0x890c, 0x00046003},
+ {0x8910, 0x31cc4200},
+ {0x8914, 0x06a20007},
+ {0x8918, 0x31f807a3},
+ {0x891c, 0x77000005},
+ {0x8920, 0x52000007},
+ {0x8924, 0x42fe0004},
+ {0x8928, 0x60000007},
+ {0x892c, 0x42000004},
+ {0x8930, 0x60004380},
+ {0x8934, 0x62016100},
+ {0x8938, 0x00056310},
+ {0x893c, 0x55004100},
+ {0x8940, 0x5c020007},
+ {0x8944, 0x43000004},
+ {0x8948, 0xe2d70001},
+ {0x894c, 0x73000005},
+ {0x8950, 0xe2d70001},
+ {0x8954, 0x5d000006},
+ {0x8958, 0x42f70004},
+ {0x895c, 0x6c000005},
+ {0x8960, 0x42000004},
+ {0x8964, 0x0004e2de},
+ {0x8968, 0x00074380},
+ {0x896c, 0x4a004e00},
+ {0x8970, 0x00064c00},
+ {0x8974, 0x60007f00},
+ {0x8978, 0x00046f00},
+ {0x897c, 0x00054300},
+ {0x8980, 0x00017300},
+ {0x8984, 0xe2d70001},
+ {0x8988, 0x5d010006},
+ {0x898c, 0x61006002},
+ {0x8990, 0x00055601},
+ {0x8994, 0xe2e27710},
+ {0x8998, 0x73000005},
+ {0x899c, 0x43800004},
+ {0x89a0, 0x5e010007},
+ {0x89a4, 0x4d205e00},
+ {0x89a8, 0x4a084e20},
+ {0x89ac, 0x4c3f4960},
+ {0x89b0, 0x00064301},
+ {0x89b4, 0x63807f01},
+ {0x89b8, 0x00046010},
+ {0x89bc, 0x00064300},
+ {0x89c0, 0x00077402},
+ {0x89c4, 0x40004001},
+ {0x89c8, 0x0006ab00},
+ {0x89cc, 0x00077404},
+ {0x89d0, 0x40004001},
+ {0x89d4, 0x0004ab00},
+ {0x89d8, 0x00074380},
+ {0x89dc, 0x4e004d00},
+ {0x89e0, 0x4c004a00},
+ {0x89e4, 0x00064300},
+ {0x89e8, 0x63007f00},
+ {0x89ec, 0x00046000},
+ {0x89f0, 0x00014300},
+ {0x89f4, 0x73800005},
+ {0x89f8, 0x42fe0004},
+ {0x89fc, 0x6c010005},
+ {0x8a00, 0x000514c8},
+ {0x8a04, 0x00046c00},
+ {0x8a08, 0x00014200},
+ {0x8a0c, 0x0005e2ce},
+ {0x8a10, 0x00017300},
+ {0x8a14, 0x00040006},
+ {0x8a18, 0x42fa4380},
+ {0x8a1c, 0x42007c05},
+ {0x8a20, 0x7c5b0006},
+ {0x8a24, 0x7e5b7d5b},
+ {0x8a28, 0x00077f00},
+ {0x8a2c, 0x415b405b},
+ {0x8a30, 0x4300425b},
+ {0x8a34, 0x43000004},
+ {0x8a38, 0x00040001},
+ {0x8a3c, 0x60004380},
+ {0x8a40, 0x62016100},
+ {0x8a44, 0x42fa6310},
+ {0x8a48, 0x42007c00},
+ {0x8a4c, 0x00014300},
+ {0x8a50, 0x0001e2e5},
+ {0x8a54, 0x55000007},
+ {0x8a58, 0x74200004},
+ {0x8a5c, 0x79017711},
+ {0x8a60, 0x57005710},
+ {0x8a64, 0x00019700},
+ {0x8a68, 0x4e004f02},
+ {0x8a6c, 0x52015302},
+ {0x8a70, 0x43800001},
+ {0x8a74, 0x78006505},
+ {0x8a78, 0x7a007900},
+ {0x8a7c, 0x43007b00},
+ {0x8a80, 0x43800001},
+ {0x8a84, 0x43006500},
+ {0x8a88, 0x43800001},
+ {0x8a8c, 0x7c006405},
+ {0x8a90, 0x00014300},
+ {0x8a94, 0x64004380},
+ {0x8a98, 0x00014300},
+ {0x8a9c, 0x74200004},
+ {0x8aa0, 0x0005e392},
+ {0x8aa4, 0x73807388},
+ {0x8aa8, 0xe3a08f00},
+ {0x8aac, 0xe3920001},
+ {0x8ab0, 0x73810005},
+ {0x8ab4, 0x93007380},
+ {0x8ab8, 0x0001e3a0},
+ {0x8abc, 0xe2e5e3a7},
+ {0x8ac0, 0x0001e3ae},
+ {0x8ac4, 0xe3aee3a7},
+ {0x8ac8, 0x00040001},
+ {0x8acc, 0x24207410},
+ {0x8ad0, 0x14c80000},
+ {0x8ad4, 0x00002428},
+ {0x8ad8, 0x1a4215f4},
+ {0x8adc, 0x74300008},
+ {0x8ae0, 0x43800001},
+ {0x8ae4, 0x7a907b48},
+ {0x8ae8, 0x78027900},
+ {0x8aec, 0x55034300},
+ {0x8af0, 0x43803308},
+ {0x8af4, 0x7a807b38},
+ {0x8af8, 0x55134300},
+ {0x8afc, 0x43803308},
+ {0x8b00, 0x7a007b40},
+ {0x8b04, 0x55234300},
+ {0x8b08, 0x74007401},
+ {0x8b0c, 0x00018e00},
+ {0x8b10, 0x52300007},
+ {0x8b14, 0x74310004},
+ {0x8b18, 0x8e007430},
+ {0x8b1c, 0x52200007},
+ {0x8b20, 0x00010004},
+ {0x8b24, 0x57005702},
+ {0x8b28, 0x00018e00},
+ {0x8b2c, 0x561042ef},
+ {0x8b30, 0x42005600},
+ {0x8b34, 0x00018c00},
+ {0x8b38, 0x4e004f78},
+ {0x8b3c, 0x52015388},
+ {0x8b40, 0xe32b5b20},
+ {0x8b44, 0x54005480},
+ {0x8b48, 0x54005481},
+ {0x8b4c, 0x54005482},
+ {0x8b50, 0xbf1de336},
+ {0x8b54, 0xe2f13010},
+ {0x8b58, 0xe2ffe2f9},
+ {0x8b5c, 0xe3b3e312},
+ {0x8b60, 0xe3085523},
+ {0x8b64, 0xe3125525},
+ {0x8b68, 0x0001e3b3},
+ {0x8b6c, 0x54c054bf},
+ {0x8b70, 0x54c154a3},
+ {0x8b74, 0x4c1854a4},
+ {0x8b78, 0x54c2bf07},
+ {0x8b7c, 0xbf0454a4},
+ {0x8b80, 0x54a354c1},
+ {0x8b84, 0xe3c4bf01},
+ {0x8b88, 0x000154df},
+ {0x8b8c, 0x54e554bf},
+ {0x8b90, 0x54df050a},
+ {0x8b94, 0x16570001},
+ {0x8b98, 0x74307b80},
+ {0x8b9c, 0x7f404380},
+ {0x8ba0, 0x7d007e00},
+ {0x8ba4, 0x43007c02},
+ {0x8ba8, 0x55015b40},
+ {0x8bac, 0xe3165c01},
+ {0x8bb0, 0x54005480},
+ {0x8bb4, 0x54005481},
+ {0x8bb8, 0x54005482},
+ {0x8bbc, 0x74107b00},
+ {0x8bc0, 0xbfe5e336},
+ {0x8bc4, 0x56103010},
+ {0x8bc8, 0x8c005600},
+ {0x8bcc, 0x57040001},
+ {0x8bd0, 0x8e005700},
+ {0x8bd4, 0x57005708},
+ {0x8bd8, 0x57818e00},
+ {0x8bdc, 0x8e005780},
+ {0x8be0, 0x00074380},
+ {0x8be4, 0x5c005c01},
+ {0x8be8, 0x00041403},
+ {0x8bec, 0x00014300},
+ {0x8bf0, 0x0007427f},
+ {0x8bf4, 0x62006280},
+ {0x8bf8, 0x00049200},
+ {0x8bfc, 0x00014200},
+ {0x8c00, 0x0007427f},
+ {0x8c04, 0x63146394},
+ {0x8c08, 0x00049200},
+ {0x8c0c, 0x00014200},
+ {0x8c10, 0x42fe0004},
+ {0x8c14, 0x42007901},
+ {0x8c18, 0x14037420},
+ {0x8c1c, 0x57005710},
+ {0x8c20, 0x0001140f},
+ {0x8c24, 0x56010006},
+ {0x8c28, 0x54005502},
+ {0x8c2c, 0x7f000005},
+ {0x8c30, 0x77107e12},
+ {0x8c34, 0x75007600},
+ {0x8c38, 0x00047400},
+ {0x8c3c, 0x00014270},
+ {0x8c40, 0x42000004},
+ {0x8c44, 0x77000005},
+ {0x8c48, 0x56000006},
+ {0x8c4c, 0x00060001},
+ {0x8c50, 0x5f005f80},
+ {0x8c54, 0x00059900},
+ {0x8c58, 0x00017300},
+ {0x8c5c, 0x63800006},
+ {0x8c60, 0x98006300},
+ {0x8c64, 0x549f0001},
+ {0x8c68, 0x5c015400},
+ {0x8c6c, 0x540054df},
+ {0x8c70, 0x00015c02},
+ {0x8c74, 0x07145c01},
+ {0x8c78, 0x5c025400},
+ {0x8c7c, 0x5c020001},
+ {0x8c80, 0x54000714},
+ {0x8c84, 0x00015c01},
+ {0x8c88, 0x4c184c98},
+ {0x8c8c, 0x00040001},
+ {0x8c90, 0x74305c02},
+ {0x8c94, 0x0c010901},
+ {0x8c98, 0x00050ba6},
+ {0x8c9c, 0x00077780},
+ {0x8ca0, 0x00045220},
+ {0x8ca4, 0x60084380},
+ {0x8ca8, 0x6200610a},
+ {0x8cac, 0x000763ce},
+ {0x8cb0, 0x00045c00},
+ {0x8cb4, 0x00014300},
+ {0x8080, 0x00000004},
+ {0x8080, 0x00000000},
+ {0x8088, 0x00000000},
+};
+
+static const struct rtw89_txpwr_byrate_cfg rtw89_8852b_txpwr_byrate[] = {
+ { 0, 0, 0, 0, 4, 0x50505050, },
+ { 0, 0, 1, 0, 4, 0x50505050, },
+ { 0, 0, 1, 4, 4, 0x484c5050, },
+ { 0, 0, 2, 0, 4, 0x50505050, },
+ { 0, 0, 2, 4, 4, 0x44484c50, },
+ { 0, 0, 2, 8, 4, 0x34383c40, },
+ { 0, 0, 3, 0, 4, 0x50505050, },
+ { 0, 1, 2, 0, 4, 0x50505050, },
+ { 0, 1, 2, 4, 4, 0x44484c50, },
+ { 0, 1, 2, 8, 4, 0x34383c40, },
+ { 0, 1, 3, 0, 4, 0x50505050, },
+ { 0, 0, 4, 1, 4, 0x00000000, },
+ { 0, 0, 4, 0, 1, 0x00000000, },
+ { 1, 0, 1, 0, 4, 0x50505050, },
+ { 1, 0, 1, 4, 4, 0x484c5050, },
+ { 1, 0, 2, 0, 4, 0x50505050, },
+ { 1, 0, 2, 4, 4, 0x44484c50, },
+ { 1, 0, 2, 8, 4, 0x34383c40, },
+ { 1, 0, 3, 0, 4, 0x50505050, },
+ { 1, 1, 2, 0, 4, 0x50505050, },
+ { 1, 1, 2, 4, 4, 0x44484c50, },
+ { 1, 1, 2, 8, 4, 0x34383c40, },
+ { 1, 1, 3, 0, 4, 0x50505050, },
+ { 1, 0, 4, 0, 4, 0x00000000, },
+};
+
+static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8},
+ {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8},
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8},
+ {0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5,
+ 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4},
+ {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3},
+ {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7},
+ {0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5,
+ 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
+};
+
+static const s8 _txpwr_track_delta_swingidx_2gb_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2};
+
+static const s8 _txpwr_track_delta_swingidx_2gb_p[] = {
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6};
+
+static const s8 _txpwr_track_delta_swingidx_2ga_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+static const s8 _txpwr_track_delta_swingidx_2ga_p[] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2,
+ -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+
+const u8 rtw89_8852b_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
+ [RTW89_REGD_NUM] = {
+ [0][0][RTW89_ACMA] = 0,
+ [0][0][RTW89_CHILE] = 0,
+ [0][0][RTW89_CN] = 0,
+ [0][0][RTW89_ETSI] = 0,
+ [0][0][RTW89_FCC] = 1,
+ [0][0][RTW89_IC] = 1,
+ [0][0][RTW89_KCC] = 0,
+ [0][0][RTW89_MEXICO] = 1,
+ [0][0][RTW89_MKK] = 0,
+ [0][0][RTW89_QATAR] = 0,
+ [0][0][RTW89_UK] = 0,
+ [0][0][RTW89_UKRAINE] = 0,
+ [0][1][RTW89_ACMA] = 0,
+ [0][1][RTW89_CHILE] = 0,
+ [0][1][RTW89_CN] = 0,
+ [0][1][RTW89_ETSI] = 0,
+ [0][1][RTW89_FCC] = 3,
+ [0][1][RTW89_IC] = 3,
+ [0][1][RTW89_KCC] = 0,
+ [0][1][RTW89_MEXICO] = 3,
+ [0][1][RTW89_MKK] = 0,
+ [0][1][RTW89_QATAR] = 0,
+ [0][1][RTW89_UK] = 0,
+ [0][1][RTW89_UKRAINE] = 0,
+ [1][1][RTW89_ACMA] = 0,
+ [1][1][RTW89_CHILE] = 0,
+ [1][1][RTW89_CN] = 0,
+ [1][1][RTW89_ETSI] = 0,
+ [1][1][RTW89_FCC] = 3,
+ [1][1][RTW89_IC] = 3,
+ [1][1][RTW89_KCC] = 0,
+ [1][1][RTW89_MEXICO] = 3,
+ [1][1][RTW89_MKK] = 0,
+ [1][1][RTW89_QATAR] = 0,
+ [1][1][RTW89_UK] = 0,
+ [1][1][RTW89_UKRAINE] = 0,
+};
+
+const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][0][0][RTW89_WW][0] = 58,
+ [0][0][0][0][RTW89_WW][1] = 58,
+ [0][0][0][0][RTW89_WW][2] = 58,
+ [0][0][0][0][RTW89_WW][3] = 58,
+ [0][0][0][0][RTW89_WW][4] = 58,
+ [0][0][0][0][RTW89_WW][5] = 58,
+ [0][0][0][0][RTW89_WW][6] = 58,
+ [0][0][0][0][RTW89_WW][7] = 58,
+ [0][0][0][0][RTW89_WW][8] = 58,
+ [0][0][0][0][RTW89_WW][9] = 58,
+ [0][0][0][0][RTW89_WW][10] = 58,
+ [0][0][0][0][RTW89_WW][11] = 58,
+ [0][0][0][0][RTW89_WW][12] = 56,
+ [0][0][0][0][RTW89_WW][13] = 76,
+ [0][1][0][0][RTW89_WW][0] = 46,
+ [0][1][0][0][RTW89_WW][1] = 46,
+ [0][1][0][0][RTW89_WW][2] = 46,
+ [0][1][0][0][RTW89_WW][3] = 46,
+ [0][1][0][0][RTW89_WW][4] = 46,
+ [0][1][0][0][RTW89_WW][5] = 46,
+ [0][1][0][0][RTW89_WW][6] = 46,
+ [0][1][0][0][RTW89_WW][7] = 46,
+ [0][1][0][0][RTW89_WW][8] = 46,
+ [0][1][0][0][RTW89_WW][9] = 46,
+ [0][1][0][0][RTW89_WW][10] = 46,
+ [0][1][0][0][RTW89_WW][11] = 46,
+ [0][1][0][0][RTW89_WW][12] = 42,
+ [0][1][0][0][RTW89_WW][13] = 64,
+ [1][0][0][0][RTW89_WW][0] = 0,
+ [1][0][0][0][RTW89_WW][1] = 0,
+ [1][0][0][0][RTW89_WW][2] = 50,
+ [1][0][0][0][RTW89_WW][3] = 50,
+ [1][0][0][0][RTW89_WW][4] = 50,
+ [1][0][0][0][RTW89_WW][5] = 58,
+ [1][0][0][0][RTW89_WW][6] = 50,
+ [1][0][0][0][RTW89_WW][7] = 50,
+ [1][0][0][0][RTW89_WW][8] = 50,
+ [1][0][0][0][RTW89_WW][9] = 42,
+ [1][0][0][0][RTW89_WW][10] = 30,
+ [1][0][0][0][RTW89_WW][11] = 0,
+ [1][0][0][0][RTW89_WW][12] = 0,
+ [1][0][0][0][RTW89_WW][13] = 0,
+ [1][1][0][0][RTW89_WW][0] = 0,
+ [1][1][0][0][RTW89_WW][1] = 0,
+ [1][1][0][0][RTW89_WW][2] = 46,
+ [1][1][0][0][RTW89_WW][3] = 46,
+ [1][1][0][0][RTW89_WW][4] = 46,
+ [1][1][0][0][RTW89_WW][5] = 46,
+ [1][1][0][0][RTW89_WW][6] = 34,
+ [1][1][0][0][RTW89_WW][7] = 34,
+ [1][1][0][0][RTW89_WW][8] = 34,
+ [1][1][0][0][RTW89_WW][9] = 30,
+ [1][1][0][0][RTW89_WW][10] = 30,
+ [1][1][0][0][RTW89_WW][11] = 0,
+ [1][1][0][0][RTW89_WW][12] = 0,
+ [1][1][0][0][RTW89_WW][13] = 0,
+ [0][0][1][0][RTW89_WW][0] = 58,
+ [0][0][1][0][RTW89_WW][1] = 58,
+ [0][0][1][0][RTW89_WW][2] = 58,
+ [0][0][1][0][RTW89_WW][3] = 58,
+ [0][0][1][0][RTW89_WW][4] = 58,
+ [0][0][1][0][RTW89_WW][5] = 58,
+ [0][0][1][0][RTW89_WW][6] = 58,
+ [0][0][1][0][RTW89_WW][7] = 58,
+ [0][0][1][0][RTW89_WW][8] = 58,
+ [0][0][1][0][RTW89_WW][9] = 58,
+ [0][0][1][0][RTW89_WW][10] = 58,
+ [0][0][1][0][RTW89_WW][11] = 54,
+ [0][0][1][0][RTW89_WW][12] = 50,
+ [0][0][1][0][RTW89_WW][13] = 0,
+ [0][1][1][0][RTW89_WW][0] = 46,
+ [0][1][1][0][RTW89_WW][1] = 46,
+ [0][1][1][0][RTW89_WW][2] = 46,
+ [0][1][1][0][RTW89_WW][3] = 46,
+ [0][1][1][0][RTW89_WW][4] = 46,
+ [0][1][1][0][RTW89_WW][5] = 46,
+ [0][1][1][0][RTW89_WW][6] = 46,
+ [0][1][1][0][RTW89_WW][7] = 46,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][9] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][11] = 46,
+ [0][1][1][0][RTW89_WW][12] = 42,
+ [0][1][1][0][RTW89_WW][13] = 0,
+ [0][0][2][0][RTW89_WW][0] = 58,
+ [0][0][2][0][RTW89_WW][1] = 58,
+ [0][0][2][0][RTW89_WW][2] = 58,
+ [0][0][2][0][RTW89_WW][3] = 58,
+ [0][0][2][0][RTW89_WW][4] = 58,
+ [0][0][2][0][RTW89_WW][5] = 58,
+ [0][0][2][0][RTW89_WW][6] = 58,
+ [0][0][2][0][RTW89_WW][7] = 58,
+ [0][0][2][0][RTW89_WW][8] = 58,
+ [0][0][2][0][RTW89_WW][9] = 58,
+ [0][0][2][0][RTW89_WW][10] = 58,
+ [0][0][2][0][RTW89_WW][11] = 54,
+ [0][0][2][0][RTW89_WW][12] = 50,
+ [0][0][2][0][RTW89_WW][13] = 0,
+ [0][1][2][0][RTW89_WW][0] = 46,
+ [0][1][2][0][RTW89_WW][1] = 46,
+ [0][1][2][0][RTW89_WW][2] = 46,
+ [0][1][2][0][RTW89_WW][3] = 46,
+ [0][1][2][0][RTW89_WW][4] = 46,
+ [0][1][2][0][RTW89_WW][5] = 46,
+ [0][1][2][0][RTW89_WW][6] = 46,
+ [0][1][2][0][RTW89_WW][7] = 46,
+ [0][1][2][0][RTW89_WW][8] = 46,
+ [0][1][2][0][RTW89_WW][9] = 46,
+ [0][1][2][0][RTW89_WW][10] = 46,
+ [0][1][2][0][RTW89_WW][11] = 46,
+ [0][1][2][0][RTW89_WW][12] = 42,
+ [0][1][2][0][RTW89_WW][13] = 0,
+ [0][1][2][1][RTW89_WW][0] = 34,
+ [0][1][2][1][RTW89_WW][1] = 34,
+ [0][1][2][1][RTW89_WW][2] = 34,
+ [0][1][2][1][RTW89_WW][3] = 34,
+ [0][1][2][1][RTW89_WW][4] = 34,
+ [0][1][2][1][RTW89_WW][5] = 34,
+ [0][1][2][1][RTW89_WW][6] = 34,
+ [0][1][2][1][RTW89_WW][7] = 34,
+ [0][1][2][1][RTW89_WW][8] = 34,
+ [0][1][2][1][RTW89_WW][9] = 34,
+ [0][1][2][1][RTW89_WW][10] = 34,
+ [0][1][2][1][RTW89_WW][11] = 34,
+ [0][1][2][1][RTW89_WW][12] = 34,
+ [0][1][2][1][RTW89_WW][13] = 0,
+ [1][0][2][0][RTW89_WW][0] = 0,
+ [1][0][2][0][RTW89_WW][1] = 0,
+ [1][0][2][0][RTW89_WW][2] = 58,
+ [1][0][2][0][RTW89_WW][3] = 58,
+ [1][0][2][0][RTW89_WW][4] = 58,
+ [1][0][2][0][RTW89_WW][5] = 58,
+ [1][0][2][0][RTW89_WW][6] = 58,
+ [1][0][2][0][RTW89_WW][7] = 58,
+ [1][0][2][0][RTW89_WW][8] = 58,
+ [1][0][2][0][RTW89_WW][9] = 58,
+ [1][0][2][0][RTW89_WW][10] = 58,
+ [1][0][2][0][RTW89_WW][11] = 0,
+ [1][0][2][0][RTW89_WW][12] = 0,
+ [1][0][2][0][RTW89_WW][13] = 0,
+ [1][1][2][0][RTW89_WW][0] = 0,
+ [1][1][2][0][RTW89_WW][1] = 0,
+ [1][1][2][0][RTW89_WW][2] = 46,
+ [1][1][2][0][RTW89_WW][3] = 46,
+ [1][1][2][0][RTW89_WW][4] = 46,
+ [1][1][2][0][RTW89_WW][5] = 46,
+ [1][1][2][0][RTW89_WW][6] = 46,
+ [1][1][2][0][RTW89_WW][7] = 46,
+ [1][1][2][0][RTW89_WW][8] = 46,
+ [1][1][2][0][RTW89_WW][9] = 42,
+ [1][1][2][0][RTW89_WW][10] = 38,
+ [1][1][2][0][RTW89_WW][11] = 0,
+ [1][1][2][0][RTW89_WW][12] = 0,
+ [1][1][2][0][RTW89_WW][13] = 0,
+ [1][1][2][1][RTW89_WW][0] = 0,
+ [1][1][2][1][RTW89_WW][1] = 0,
+ [1][1][2][1][RTW89_WW][2] = 34,
+ [1][1][2][1][RTW89_WW][3] = 34,
+ [1][1][2][1][RTW89_WW][4] = 34,
+ [1][1][2][1][RTW89_WW][5] = 34,
+ [1][1][2][1][RTW89_WW][6] = 34,
+ [1][1][2][1][RTW89_WW][7] = 34,
+ [1][1][2][1][RTW89_WW][8] = 34,
+ [1][1][2][1][RTW89_WW][9] = 34,
+ [1][1][2][1][RTW89_WW][10] = 34,
+ [1][1][2][1][RTW89_WW][11] = 0,
+ [1][1][2][1][RTW89_WW][12] = 0,
+ [1][1][2][1][RTW89_WW][13] = 0,
+ [0][0][0][0][RTW89_FCC][0] = 78,
+ [0][0][0][0][RTW89_ETSI][0] = 58,
+ [0][0][0][0][RTW89_MKK][0] = 68,
+ [0][0][0][0][RTW89_IC][0] = 78,
+ [0][0][0][0][RTW89_KCC][0] = 68,
+ [0][0][0][0][RTW89_ACMA][0] = 58,
+ [0][0][0][0][RTW89_CHILE][0] = 64,
+ [0][0][0][0][RTW89_UKRAINE][0] = 58,
+ [0][0][0][0][RTW89_MEXICO][0] = 78,
+ [0][0][0][0][RTW89_CN][0] = 58,
+ [0][0][0][0][RTW89_QATAR][0] = 58,
+ [0][0][0][0][RTW89_UK][0] = 58,
+ [0][0][0][0][RTW89_FCC][1] = 78,
+ [0][0][0][0][RTW89_ETSI][1] = 58,
+ [0][0][0][0][RTW89_MKK][1] = 68,
+ [0][0][0][0][RTW89_IC][1] = 78,
+ [0][0][0][0][RTW89_KCC][1] = 68,
+ [0][0][0][0][RTW89_ACMA][1] = 58,
+ [0][0][0][0][RTW89_CHILE][1] = 64,
+ [0][0][0][0][RTW89_UKRAINE][1] = 58,
+ [0][0][0][0][RTW89_MEXICO][1] = 78,
+ [0][0][0][0][RTW89_CN][1] = 58,
+ [0][0][0][0][RTW89_QATAR][1] = 58,
+ [0][0][0][0][RTW89_UK][1] = 58,
+ [0][0][0][0][RTW89_FCC][2] = 78,
+ [0][0][0][0][RTW89_ETSI][2] = 58,
+ [0][0][0][0][RTW89_MKK][2] = 68,
+ [0][0][0][0][RTW89_IC][2] = 78,
+ [0][0][0][0][RTW89_KCC][2] = 68,
+ [0][0][0][0][RTW89_ACMA][2] = 58,
+ [0][0][0][0][RTW89_CHILE][2] = 64,
+ [0][0][0][0][RTW89_UKRAINE][2] = 58,
+ [0][0][0][0][RTW89_MEXICO][2] = 78,
+ [0][0][0][0][RTW89_CN][2] = 58,
+ [0][0][0][0][RTW89_QATAR][2] = 58,
+ [0][0][0][0][RTW89_UK][2] = 58,
+ [0][0][0][0][RTW89_FCC][3] = 78,
+ [0][0][0][0][RTW89_ETSI][3] = 58,
+ [0][0][0][0][RTW89_MKK][3] = 68,
+ [0][0][0][0][RTW89_IC][3] = 78,
+ [0][0][0][0][RTW89_KCC][3] = 68,
+ [0][0][0][0][RTW89_ACMA][3] = 58,
+ [0][0][0][0][RTW89_CHILE][3] = 64,
+ [0][0][0][0][RTW89_UKRAINE][3] = 58,
+ [0][0][0][0][RTW89_MEXICO][3] = 78,
+ [0][0][0][0][RTW89_CN][3] = 58,
+ [0][0][0][0][RTW89_QATAR][3] = 58,
+ [0][0][0][0][RTW89_UK][3] = 58,
+ [0][0][0][0][RTW89_FCC][4] = 78,
+ [0][0][0][0][RTW89_ETSI][4] = 58,
+ [0][0][0][0][RTW89_MKK][4] = 68,
+ [0][0][0][0][RTW89_IC][4] = 78,
+ [0][0][0][0][RTW89_KCC][4] = 70,
+ [0][0][0][0][RTW89_ACMA][4] = 58,
+ [0][0][0][0][RTW89_CHILE][4] = 64,
+ [0][0][0][0][RTW89_UKRAINE][4] = 58,
+ [0][0][0][0][RTW89_MEXICO][4] = 78,
+ [0][0][0][0][RTW89_CN][4] = 58,
+ [0][0][0][0][RTW89_QATAR][4] = 58,
+ [0][0][0][0][RTW89_UK][4] = 58,
+ [0][0][0][0][RTW89_FCC][5] = 78,
+ [0][0][0][0][RTW89_ETSI][5] = 58,
+ [0][0][0][0][RTW89_MKK][5] = 68,
+ [0][0][0][0][RTW89_IC][5] = 78,
+ [0][0][0][0][RTW89_KCC][5] = 70,
+ [0][0][0][0][RTW89_ACMA][5] = 58,
+ [0][0][0][0][RTW89_CHILE][5] = 64,
+ [0][0][0][0][RTW89_UKRAINE][5] = 58,
+ [0][0][0][0][RTW89_MEXICO][5] = 78,
+ [0][0][0][0][RTW89_CN][5] = 58,
+ [0][0][0][0][RTW89_QATAR][5] = 58,
+ [0][0][0][0][RTW89_UK][5] = 58,
+ [0][0][0][0][RTW89_FCC][6] = 78,
+ [0][0][0][0][RTW89_ETSI][6] = 58,
+ [0][0][0][0][RTW89_MKK][6] = 68,
+ [0][0][0][0][RTW89_IC][6] = 78,
+ [0][0][0][0][RTW89_KCC][6] = 70,
+ [0][0][0][0][RTW89_ACMA][6] = 58,
+ [0][0][0][0][RTW89_CHILE][6] = 64,
+ [0][0][0][0][RTW89_UKRAINE][6] = 58,
+ [0][0][0][0][RTW89_MEXICO][6] = 78,
+ [0][0][0][0][RTW89_CN][6] = 58,
+ [0][0][0][0][RTW89_QATAR][6] = 58,
+ [0][0][0][0][RTW89_UK][6] = 58,
+ [0][0][0][0][RTW89_FCC][7] = 78,
+ [0][0][0][0][RTW89_ETSI][7] = 58,
+ [0][0][0][0][RTW89_MKK][7] = 68,
+ [0][0][0][0][RTW89_IC][7] = 78,
+ [0][0][0][0][RTW89_KCC][7] = 70,
+ [0][0][0][0][RTW89_ACMA][7] = 58,
+ [0][0][0][0][RTW89_CHILE][7] = 64,
+ [0][0][0][0][RTW89_UKRAINE][7] = 58,
+ [0][0][0][0][RTW89_MEXICO][7] = 78,
+ [0][0][0][0][RTW89_CN][7] = 58,
+ [0][0][0][0][RTW89_QATAR][7] = 58,
+ [0][0][0][0][RTW89_UK][7] = 58,
+ [0][0][0][0][RTW89_FCC][8] = 78,
+ [0][0][0][0][RTW89_ETSI][8] = 58,
+ [0][0][0][0][RTW89_MKK][8] = 68,
+ [0][0][0][0][RTW89_IC][8] = 78,
+ [0][0][0][0][RTW89_KCC][8] = 70,
+ [0][0][0][0][RTW89_ACMA][8] = 58,
+ [0][0][0][0][RTW89_CHILE][8] = 64,
+ [0][0][0][0][RTW89_UKRAINE][8] = 58,
+ [0][0][0][0][RTW89_MEXICO][8] = 78,
+ [0][0][0][0][RTW89_CN][8] = 58,
+ [0][0][0][0][RTW89_QATAR][8] = 58,
+ [0][0][0][0][RTW89_UK][8] = 58,
+ [0][0][0][0][RTW89_FCC][9] = 78,
+ [0][0][0][0][RTW89_ETSI][9] = 58,
+ [0][0][0][0][RTW89_MKK][9] = 68,
+ [0][0][0][0][RTW89_IC][9] = 78,
+ [0][0][0][0][RTW89_KCC][9] = 70,
+ [0][0][0][0][RTW89_ACMA][9] = 58,
+ [0][0][0][0][RTW89_CHILE][9] = 64,
+ [0][0][0][0][RTW89_UKRAINE][9] = 58,
+ [0][0][0][0][RTW89_MEXICO][9] = 78,
+ [0][0][0][0][RTW89_CN][9] = 58,
+ [0][0][0][0][RTW89_QATAR][9] = 58,
+ [0][0][0][0][RTW89_UK][9] = 58,
+ [0][0][0][0][RTW89_FCC][10] = 78,
+ [0][0][0][0][RTW89_ETSI][10] = 58,
+ [0][0][0][0][RTW89_MKK][10] = 68,
+ [0][0][0][0][RTW89_IC][10] = 78,
+ [0][0][0][0][RTW89_KCC][10] = 70,
+ [0][0][0][0][RTW89_ACMA][10] = 58,
+ [0][0][0][0][RTW89_CHILE][10] = 66,
+ [0][0][0][0][RTW89_UKRAINE][10] = 58,
+ [0][0][0][0][RTW89_MEXICO][10] = 78,
+ [0][0][0][0][RTW89_CN][10] = 58,
+ [0][0][0][0][RTW89_QATAR][10] = 58,
+ [0][0][0][0][RTW89_UK][10] = 58,
+ [0][0][0][0][RTW89_FCC][11] = 70,
+ [0][0][0][0][RTW89_ETSI][11] = 58,
+ [0][0][0][0][RTW89_MKK][11] = 68,
+ [0][0][0][0][RTW89_IC][11] = 70,
+ [0][0][0][0][RTW89_KCC][11] = 70,
+ [0][0][0][0][RTW89_ACMA][11] = 58,
+ [0][0][0][0][RTW89_CHILE][11] = 64,
+ [0][0][0][0][RTW89_UKRAINE][11] = 58,
+ [0][0][0][0][RTW89_MEXICO][11] = 70,
+ [0][0][0][0][RTW89_CN][11] = 58,
+ [0][0][0][0][RTW89_QATAR][11] = 58,
+ [0][0][0][0][RTW89_UK][11] = 58,
+ [0][0][0][0][RTW89_FCC][12] = 56,
+ [0][0][0][0][RTW89_ETSI][12] = 58,
+ [0][0][0][0][RTW89_MKK][12] = 68,
+ [0][0][0][0][RTW89_IC][12] = 56,
+ [0][0][0][0][RTW89_KCC][12] = 70,
+ [0][0][0][0][RTW89_ACMA][12] = 58,
+ [0][0][0][0][RTW89_CHILE][12] = 56,
+ [0][0][0][0][RTW89_UKRAINE][12] = 58,
+ [0][0][0][0][RTW89_MEXICO][12] = 56,
+ [0][0][0][0][RTW89_CN][12] = 58,
+ [0][0][0][0][RTW89_QATAR][12] = 58,
+ [0][0][0][0][RTW89_UK][12] = 58,
+ [0][0][0][0][RTW89_FCC][13] = 127,
+ [0][0][0][0][RTW89_ETSI][13] = 127,
+ [0][0][0][0][RTW89_MKK][13] = 76,
+ [0][0][0][0][RTW89_IC][13] = 127,
+ [0][0][0][0][RTW89_KCC][13] = 127,
+ [0][0][0][0][RTW89_ACMA][13] = 127,
+ [0][0][0][0][RTW89_CHILE][13] = 127,
+ [0][0][0][0][RTW89_UKRAINE][13] = 127,
+ [0][0][0][0][RTW89_MEXICO][13] = 127,
+ [0][0][0][0][RTW89_CN][13] = 127,
+ [0][0][0][0][RTW89_QATAR][13] = 127,
+ [0][0][0][0][RTW89_UK][13] = 127,
+ [0][1][0][0][RTW89_FCC][0] = 74,
+ [0][1][0][0][RTW89_ETSI][0] = 46,
+ [0][1][0][0][RTW89_MKK][0] = 56,
+ [0][1][0][0][RTW89_IC][0] = 74,
+ [0][1][0][0][RTW89_KCC][0] = 58,
+ [0][1][0][0][RTW89_ACMA][0] = 46,
+ [0][1][0][0][RTW89_CHILE][0] = 50,
+ [0][1][0][0][RTW89_UKRAINE][0] = 46,
+ [0][1][0][0][RTW89_MEXICO][0] = 74,
+ [0][1][0][0][RTW89_CN][0] = 46,
+ [0][1][0][0][RTW89_QATAR][0] = 46,
+ [0][1][0][0][RTW89_UK][0] = 46,
+ [0][1][0][0][RTW89_FCC][1] = 74,
+ [0][1][0][0][RTW89_ETSI][1] = 46,
+ [0][1][0][0][RTW89_MKK][1] = 56,
+ [0][1][0][0][RTW89_IC][1] = 74,
+ [0][1][0][0][RTW89_KCC][1] = 58,
+ [0][1][0][0][RTW89_ACMA][1] = 46,
+ [0][1][0][0][RTW89_CHILE][1] = 50,
+ [0][1][0][0][RTW89_UKRAINE][1] = 46,
+ [0][1][0][0][RTW89_MEXICO][1] = 74,
+ [0][1][0][0][RTW89_CN][1] = 46,
+ [0][1][0][0][RTW89_QATAR][1] = 46,
+ [0][1][0][0][RTW89_UK][1] = 46,
+ [0][1][0][0][RTW89_FCC][2] = 74,
+ [0][1][0][0][RTW89_ETSI][2] = 46,
+ [0][1][0][0][RTW89_MKK][2] = 56,
+ [0][1][0][0][RTW89_IC][2] = 74,
+ [0][1][0][0][RTW89_KCC][2] = 58,
+ [0][1][0][0][RTW89_ACMA][2] = 46,
+ [0][1][0][0][RTW89_CHILE][2] = 50,
+ [0][1][0][0][RTW89_UKRAINE][2] = 46,
+ [0][1][0][0][RTW89_MEXICO][2] = 74,
+ [0][1][0][0][RTW89_CN][2] = 46,
+ [0][1][0][0][RTW89_QATAR][2] = 46,
+ [0][1][0][0][RTW89_UK][2] = 46,
+ [0][1][0][0][RTW89_FCC][3] = 74,
+ [0][1][0][0][RTW89_ETSI][3] = 46,
+ [0][1][0][0][RTW89_MKK][3] = 56,
+ [0][1][0][0][RTW89_IC][3] = 74,
+ [0][1][0][0][RTW89_KCC][3] = 58,
+ [0][1][0][0][RTW89_ACMA][3] = 46,
+ [0][1][0][0][RTW89_CHILE][3] = 50,
+ [0][1][0][0][RTW89_UKRAINE][3] = 46,
+ [0][1][0][0][RTW89_MEXICO][3] = 74,
+ [0][1][0][0][RTW89_CN][3] = 46,
+ [0][1][0][0][RTW89_QATAR][3] = 46,
+ [0][1][0][0][RTW89_UK][3] = 46,
+ [0][1][0][0][RTW89_FCC][4] = 74,
+ [0][1][0][0][RTW89_ETSI][4] = 46,
+ [0][1][0][0][RTW89_MKK][4] = 56,
+ [0][1][0][0][RTW89_IC][4] = 74,
+ [0][1][0][0][RTW89_KCC][4] = 56,
+ [0][1][0][0][RTW89_ACMA][4] = 46,
+ [0][1][0][0][RTW89_CHILE][4] = 50,
+ [0][1][0][0][RTW89_UKRAINE][4] = 46,
+ [0][1][0][0][RTW89_MEXICO][4] = 74,
+ [0][1][0][0][RTW89_CN][4] = 46,
+ [0][1][0][0][RTW89_QATAR][4] = 46,
+ [0][1][0][0][RTW89_UK][4] = 46,
+ [0][1][0][0][RTW89_FCC][5] = 74,
+ [0][1][0][0][RTW89_ETSI][5] = 46,
+ [0][1][0][0][RTW89_MKK][5] = 56,
+ [0][1][0][0][RTW89_IC][5] = 74,
+ [0][1][0][0][RTW89_KCC][5] = 56,
+ [0][1][0][0][RTW89_ACMA][5] = 46,
+ [0][1][0][0][RTW89_CHILE][5] = 50,
+ [0][1][0][0][RTW89_UKRAINE][5] = 46,
+ [0][1][0][0][RTW89_MEXICO][5] = 74,
+ [0][1][0][0][RTW89_CN][5] = 46,
+ [0][1][0][0][RTW89_QATAR][5] = 46,
+ [0][1][0][0][RTW89_UK][5] = 46,
+ [0][1][0][0][RTW89_FCC][6] = 74,
+ [0][1][0][0][RTW89_ETSI][6] = 46,
+ [0][1][0][0][RTW89_MKK][6] = 56,
+ [0][1][0][0][RTW89_IC][6] = 74,
+ [0][1][0][0][RTW89_KCC][6] = 56,
+ [0][1][0][0][RTW89_ACMA][6] = 46,
+ [0][1][0][0][RTW89_CHILE][6] = 52,
+ [0][1][0][0][RTW89_UKRAINE][6] = 46,
+ [0][1][0][0][RTW89_MEXICO][6] = 74,
+ [0][1][0][0][RTW89_CN][6] = 46,
+ [0][1][0][0][RTW89_QATAR][6] = 46,
+ [0][1][0][0][RTW89_UK][6] = 46,
+ [0][1][0][0][RTW89_FCC][7] = 74,
+ [0][1][0][0][RTW89_ETSI][7] = 46,
+ [0][1][0][0][RTW89_MKK][7] = 56,
+ [0][1][0][0][RTW89_IC][7] = 74,
+ [0][1][0][0][RTW89_KCC][7] = 56,
+ [0][1][0][0][RTW89_ACMA][7] = 46,
+ [0][1][0][0][RTW89_CHILE][7] = 50,
+ [0][1][0][0][RTW89_UKRAINE][7] = 46,
+ [0][1][0][0][RTW89_MEXICO][7] = 74,
+ [0][1][0][0][RTW89_CN][7] = 46,
+ [0][1][0][0][RTW89_QATAR][7] = 46,
+ [0][1][0][0][RTW89_UK][7] = 46,
+ [0][1][0][0][RTW89_FCC][8] = 74,
+ [0][1][0][0][RTW89_ETSI][8] = 46,
+ [0][1][0][0][RTW89_MKK][8] = 56,
+ [0][1][0][0][RTW89_IC][8] = 74,
+ [0][1][0][0][RTW89_KCC][8] = 56,
+ [0][1][0][0][RTW89_ACMA][8] = 46,
+ [0][1][0][0][RTW89_CHILE][8] = 50,
+ [0][1][0][0][RTW89_UKRAINE][8] = 46,
+ [0][1][0][0][RTW89_MEXICO][8] = 74,
+ [0][1][0][0][RTW89_CN][8] = 46,
+ [0][1][0][0][RTW89_QATAR][8] = 46,
+ [0][1][0][0][RTW89_UK][8] = 46,
+ [0][1][0][0][RTW89_FCC][9] = 74,
+ [0][1][0][0][RTW89_ETSI][9] = 46,
+ [0][1][0][0][RTW89_MKK][9] = 56,
+ [0][1][0][0][RTW89_IC][9] = 74,
+ [0][1][0][0][RTW89_KCC][9] = 54,
+ [0][1][0][0][RTW89_ACMA][9] = 46,
+ [0][1][0][0][RTW89_CHILE][9] = 50,
+ [0][1][0][0][RTW89_UKRAINE][9] = 46,
+ [0][1][0][0][RTW89_MEXICO][9] = 74,
+ [0][1][0][0][RTW89_CN][9] = 46,
+ [0][1][0][0][RTW89_QATAR][9] = 46,
+ [0][1][0][0][RTW89_UK][9] = 46,
+ [0][1][0][0][RTW89_FCC][10] = 74,
+ [0][1][0][0][RTW89_ETSI][10] = 46,
+ [0][1][0][0][RTW89_MKK][10] = 56,
+ [0][1][0][0][RTW89_IC][10] = 74,
+ [0][1][0][0][RTW89_KCC][10] = 54,
+ [0][1][0][0][RTW89_ACMA][10] = 46,
+ [0][1][0][0][RTW89_CHILE][10] = 52,
+ [0][1][0][0][RTW89_UKRAINE][10] = 46,
+ [0][1][0][0][RTW89_MEXICO][10] = 74,
+ [0][1][0][0][RTW89_CN][10] = 46,
+ [0][1][0][0][RTW89_QATAR][10] = 46,
+ [0][1][0][0][RTW89_UK][10] = 46,
+ [0][1][0][0][RTW89_FCC][11] = 54,
+ [0][1][0][0][RTW89_ETSI][11] = 46,
+ [0][1][0][0][RTW89_MKK][11] = 56,
+ [0][1][0][0][RTW89_IC][11] = 54,
+ [0][1][0][0][RTW89_KCC][11] = 54,
+ [0][1][0][0][RTW89_ACMA][11] = 46,
+ [0][1][0][0][RTW89_CHILE][11] = 50,
+ [0][1][0][0][RTW89_UKRAINE][11] = 46,
+ [0][1][0][0][RTW89_MEXICO][11] = 54,
+ [0][1][0][0][RTW89_CN][11] = 46,
+ [0][1][0][0][RTW89_QATAR][11] = 46,
+ [0][1][0][0][RTW89_UK][11] = 46,
+ [0][1][0][0][RTW89_FCC][12] = 42,
+ [0][1][0][0][RTW89_ETSI][12] = 46,
+ [0][1][0][0][RTW89_MKK][12] = 56,
+ [0][1][0][0][RTW89_IC][12] = 42,
+ [0][1][0][0][RTW89_KCC][12] = 54,
+ [0][1][0][0][RTW89_ACMA][12] = 46,
+ [0][1][0][0][RTW89_CHILE][12] = 42,
+ [0][1][0][0][RTW89_UKRAINE][12] = 46,
+ [0][1][0][0][RTW89_MEXICO][12] = 42,
+ [0][1][0][0][RTW89_CN][12] = 46,
+ [0][1][0][0][RTW89_QATAR][12] = 46,
+ [0][1][0][0][RTW89_UK][12] = 46,
+ [0][1][0][0][RTW89_FCC][13] = 127,
+ [0][1][0][0][RTW89_ETSI][13] = 127,
+ [0][1][0][0][RTW89_MKK][13] = 64,
+ [0][1][0][0][RTW89_IC][13] = 127,
+ [0][1][0][0][RTW89_KCC][13] = 127,
+ [0][1][0][0][RTW89_ACMA][13] = 127,
+ [0][1][0][0][RTW89_CHILE][13] = 127,
+ [0][1][0][0][RTW89_UKRAINE][13] = 127,
+ [0][1][0][0][RTW89_MEXICO][13] = 127,
+ [0][1][0][0][RTW89_CN][13] = 127,
+ [0][1][0][0][RTW89_QATAR][13] = 127,
+ [0][1][0][0][RTW89_UK][13] = 127,
+ [1][0][0][0][RTW89_FCC][0] = 127,
+ [1][0][0][0][RTW89_ETSI][0] = 127,
+ [1][0][0][0][RTW89_MKK][0] = 127,
+ [1][0][0][0][RTW89_IC][0] = 127,
+ [1][0][0][0][RTW89_KCC][0] = 127,
+ [1][0][0][0][RTW89_ACMA][0] = 127,
+ [1][0][0][0][RTW89_CHILE][0] = 127,
+ [1][0][0][0][RTW89_UKRAINE][0] = 127,
+ [1][0][0][0][RTW89_MEXICO][0] = 127,
+ [1][0][0][0][RTW89_CN][0] = 127,
+ [1][0][0][0][RTW89_QATAR][0] = 127,
+ [1][0][0][0][RTW89_UK][0] = 127,
+ [1][0][0][0][RTW89_FCC][1] = 127,
+ [1][0][0][0][RTW89_ETSI][1] = 127,
+ [1][0][0][0][RTW89_MKK][1] = 127,
+ [1][0][0][0][RTW89_IC][1] = 127,
+ [1][0][0][0][RTW89_KCC][1] = 127,
+ [1][0][0][0][RTW89_ACMA][1] = 127,
+ [1][0][0][0][RTW89_CHILE][1] = 127,
+ [1][0][0][0][RTW89_UKRAINE][1] = 127,
+ [1][0][0][0][RTW89_MEXICO][1] = 127,
+ [1][0][0][0][RTW89_CN][1] = 127,
+ [1][0][0][0][RTW89_QATAR][1] = 127,
+ [1][0][0][0][RTW89_UK][1] = 127,
+ [1][0][0][0][RTW89_FCC][2] = 50,
+ [1][0][0][0][RTW89_ETSI][2] = 58,
+ [1][0][0][0][RTW89_MKK][2] = 76,
+ [1][0][0][0][RTW89_IC][2] = 50,
+ [1][0][0][0][RTW89_KCC][2] = 70,
+ [1][0][0][0][RTW89_ACMA][2] = 58,
+ [1][0][0][0][RTW89_CHILE][2] = 62,
+ [1][0][0][0][RTW89_UKRAINE][2] = 58,
+ [1][0][0][0][RTW89_MEXICO][2] = 50,
+ [1][0][0][0][RTW89_CN][2] = 58,
+ [1][0][0][0][RTW89_QATAR][2] = 58,
+ [1][0][0][0][RTW89_UK][2] = 58,
+ [1][0][0][0][RTW89_FCC][3] = 50,
+ [1][0][0][0][RTW89_ETSI][3] = 58,
+ [1][0][0][0][RTW89_MKK][3] = 76,
+ [1][0][0][0][RTW89_IC][3] = 50,
+ [1][0][0][0][RTW89_KCC][3] = 70,
+ [1][0][0][0][RTW89_ACMA][3] = 58,
+ [1][0][0][0][RTW89_CHILE][3] = 62,
+ [1][0][0][0][RTW89_UKRAINE][3] = 58,
+ [1][0][0][0][RTW89_MEXICO][3] = 50,
+ [1][0][0][0][RTW89_CN][3] = 58,
+ [1][0][0][0][RTW89_QATAR][3] = 58,
+ [1][0][0][0][RTW89_UK][3] = 58,
+ [1][0][0][0][RTW89_FCC][4] = 50,
+ [1][0][0][0][RTW89_ETSI][4] = 58,
+ [1][0][0][0][RTW89_MKK][4] = 76,
+ [1][0][0][0][RTW89_IC][4] = 50,
+ [1][0][0][0][RTW89_KCC][4] = 70,
+ [1][0][0][0][RTW89_ACMA][4] = 58,
+ [1][0][0][0][RTW89_CHILE][4] = 62,
+ [1][0][0][0][RTW89_UKRAINE][4] = 58,
+ [1][0][0][0][RTW89_MEXICO][4] = 50,
+ [1][0][0][0][RTW89_CN][4] = 58,
+ [1][0][0][0][RTW89_QATAR][4] = 58,
+ [1][0][0][0][RTW89_UK][4] = 58,
+ [1][0][0][0][RTW89_FCC][5] = 66,
+ [1][0][0][0][RTW89_ETSI][5] = 58,
+ [1][0][0][0][RTW89_MKK][5] = 76,
+ [1][0][0][0][RTW89_IC][5] = 66,
+ [1][0][0][0][RTW89_KCC][5] = 70,
+ [1][0][0][0][RTW89_ACMA][5] = 58,
+ [1][0][0][0][RTW89_CHILE][5] = 62,
+ [1][0][0][0][RTW89_UKRAINE][5] = 58,
+ [1][0][0][0][RTW89_MEXICO][5] = 66,
+ [1][0][0][0][RTW89_CN][5] = 58,
+ [1][0][0][0][RTW89_QATAR][5] = 58,
+ [1][0][0][0][RTW89_UK][5] = 58,
+ [1][0][0][0][RTW89_FCC][6] = 50,
+ [1][0][0][0][RTW89_ETSI][6] = 58,
+ [1][0][0][0][RTW89_MKK][6] = 76,
+ [1][0][0][0][RTW89_IC][6] = 50,
+ [1][0][0][0][RTW89_KCC][6] = 70,
+ [1][0][0][0][RTW89_ACMA][6] = 58,
+ [1][0][0][0][RTW89_CHILE][6] = 62,
+ [1][0][0][0][RTW89_UKRAINE][6] = 58,
+ [1][0][0][0][RTW89_MEXICO][6] = 50,
+ [1][0][0][0][RTW89_CN][6] = 58,
+ [1][0][0][0][RTW89_QATAR][6] = 58,
+ [1][0][0][0][RTW89_UK][6] = 58,
+ [1][0][0][0][RTW89_FCC][7] = 50,
+ [1][0][0][0][RTW89_ETSI][7] = 58,
+ [1][0][0][0][RTW89_MKK][7] = 76,
+ [1][0][0][0][RTW89_IC][7] = 50,
+ [1][0][0][0][RTW89_KCC][7] = 70,
+ [1][0][0][0][RTW89_ACMA][7] = 58,
+ [1][0][0][0][RTW89_CHILE][7] = 62,
+ [1][0][0][0][RTW89_UKRAINE][7] = 58,
+ [1][0][0][0][RTW89_MEXICO][7] = 50,
+ [1][0][0][0][RTW89_CN][7] = 58,
+ [1][0][0][0][RTW89_QATAR][7] = 58,
+ [1][0][0][0][RTW89_UK][7] = 58,
+ [1][0][0][0][RTW89_FCC][8] = 50,
+ [1][0][0][0][RTW89_ETSI][8] = 58,
+ [1][0][0][0][RTW89_MKK][8] = 76,
+ [1][0][0][0][RTW89_IC][8] = 50,
+ [1][0][0][0][RTW89_KCC][8] = 70,
+ [1][0][0][0][RTW89_ACMA][8] = 58,
+ [1][0][0][0][RTW89_CHILE][8] = 62,
+ [1][0][0][0][RTW89_UKRAINE][8] = 58,
+ [1][0][0][0][RTW89_MEXICO][8] = 50,
+ [1][0][0][0][RTW89_CN][8] = 58,
+ [1][0][0][0][RTW89_QATAR][8] = 58,
+ [1][0][0][0][RTW89_UK][8] = 58,
+ [1][0][0][0][RTW89_FCC][9] = 42,
+ [1][0][0][0][RTW89_ETSI][9] = 58,
+ [1][0][0][0][RTW89_MKK][9] = 76,
+ [1][0][0][0][RTW89_IC][9] = 42,
+ [1][0][0][0][RTW89_KCC][9] = 70,
+ [1][0][0][0][RTW89_ACMA][9] = 58,
+ [1][0][0][0][RTW89_CHILE][9] = 42,
+ [1][0][0][0][RTW89_UKRAINE][9] = 58,
+ [1][0][0][0][RTW89_MEXICO][9] = 42,
+ [1][0][0][0][RTW89_CN][9] = 58,
+ [1][0][0][0][RTW89_QATAR][9] = 58,
+ [1][0][0][0][RTW89_UK][9] = 58,
+ [1][0][0][0][RTW89_FCC][10] = 30,
+ [1][0][0][0][RTW89_ETSI][10] = 58,
+ [1][0][0][0][RTW89_MKK][10] = 72,
+ [1][0][0][0][RTW89_IC][10] = 30,
+ [1][0][0][0][RTW89_KCC][10] = 70,
+ [1][0][0][0][RTW89_ACMA][10] = 58,
+ [1][0][0][0][RTW89_CHILE][10] = 30,
+ [1][0][0][0][RTW89_UKRAINE][10] = 58,
+ [1][0][0][0][RTW89_MEXICO][10] = 30,
+ [1][0][0][0][RTW89_CN][10] = 58,
+ [1][0][0][0][RTW89_QATAR][10] = 58,
+ [1][0][0][0][RTW89_UK][10] = 58,
+ [1][0][0][0][RTW89_FCC][11] = 127,
+ [1][0][0][0][RTW89_ETSI][11] = 127,
+ [1][0][0][0][RTW89_MKK][11] = 127,
+ [1][0][0][0][RTW89_IC][11] = 127,
+ [1][0][0][0][RTW89_KCC][11] = 127,
+ [1][0][0][0][RTW89_ACMA][11] = 127,
+ [1][0][0][0][RTW89_CHILE][11] = 127,
+ [1][0][0][0][RTW89_UKRAINE][11] = 127,
+ [1][0][0][0][RTW89_MEXICO][11] = 127,
+ [1][0][0][0][RTW89_CN][11] = 127,
+ [1][0][0][0][RTW89_QATAR][11] = 127,
+ [1][0][0][0][RTW89_UK][11] = 127,
+ [1][0][0][0][RTW89_FCC][12] = 127,
+ [1][0][0][0][RTW89_ETSI][12] = 127,
+ [1][0][0][0][RTW89_MKK][12] = 127,
+ [1][0][0][0][RTW89_IC][12] = 127,
+ [1][0][0][0][RTW89_KCC][12] = 127,
+ [1][0][0][0][RTW89_ACMA][12] = 127,
+ [1][0][0][0][RTW89_CHILE][12] = 127,
+ [1][0][0][0][RTW89_UKRAINE][12] = 127,
+ [1][0][0][0][RTW89_MEXICO][12] = 127,
+ [1][0][0][0][RTW89_CN][12] = 127,
+ [1][0][0][0][RTW89_QATAR][12] = 127,
+ [1][0][0][0][RTW89_UK][12] = 127,
+ [1][0][0][0][RTW89_FCC][13] = 127,
+ [1][0][0][0][RTW89_ETSI][13] = 127,
+ [1][0][0][0][RTW89_MKK][13] = 127,
+ [1][0][0][0][RTW89_IC][13] = 127,
+ [1][0][0][0][RTW89_KCC][13] = 127,
+ [1][0][0][0][RTW89_ACMA][13] = 127,
+ [1][0][0][0][RTW89_CHILE][13] = 127,
+ [1][0][0][0][RTW89_UKRAINE][13] = 127,
+ [1][0][0][0][RTW89_MEXICO][13] = 127,
+ [1][0][0][0][RTW89_CN][13] = 127,
+ [1][0][0][0][RTW89_QATAR][13] = 127,
+ [1][0][0][0][RTW89_UK][13] = 127,
+ [1][1][0][0][RTW89_FCC][0] = 127,
+ [1][1][0][0][RTW89_ETSI][0] = 127,
+ [1][1][0][0][RTW89_MKK][0] = 127,
+ [1][1][0][0][RTW89_IC][0] = 127,
+ [1][1][0][0][RTW89_KCC][0] = 127,
+ [1][1][0][0][RTW89_ACMA][0] = 127,
+ [1][1][0][0][RTW89_CHILE][0] = 127,
+ [1][1][0][0][RTW89_UKRAINE][0] = 127,
+ [1][1][0][0][RTW89_MEXICO][0] = 127,
+ [1][1][0][0][RTW89_CN][0] = 127,
+ [1][1][0][0][RTW89_QATAR][0] = 127,
+ [1][1][0][0][RTW89_UK][0] = 127,
+ [1][1][0][0][RTW89_FCC][1] = 127,
+ [1][1][0][0][RTW89_ETSI][1] = 127,
+ [1][1][0][0][RTW89_MKK][1] = 127,
+ [1][1][0][0][RTW89_IC][1] = 127,
+ [1][1][0][0][RTW89_KCC][1] = 127,
+ [1][1][0][0][RTW89_ACMA][1] = 127,
+ [1][1][0][0][RTW89_CHILE][1] = 127,
+ [1][1][0][0][RTW89_UKRAINE][1] = 127,
+ [1][1][0][0][RTW89_MEXICO][1] = 127,
+ [1][1][0][0][RTW89_CN][1] = 127,
+ [1][1][0][0][RTW89_QATAR][1] = 127,
+ [1][1][0][0][RTW89_UK][1] = 127,
+ [1][1][0][0][RTW89_FCC][2] = 46,
+ [1][1][0][0][RTW89_ETSI][2] = 46,
+ [1][1][0][0][RTW89_MKK][2] = 64,
+ [1][1][0][0][RTW89_IC][2] = 46,
+ [1][1][0][0][RTW89_KCC][2] = 58,
+ [1][1][0][0][RTW89_ACMA][2] = 46,
+ [1][1][0][0][RTW89_CHILE][2] = 50,
+ [1][1][0][0][RTW89_UKRAINE][2] = 46,
+ [1][1][0][0][RTW89_MEXICO][2] = 46,
+ [1][1][0][0][RTW89_CN][2] = 46,
+ [1][1][0][0][RTW89_QATAR][2] = 46,
+ [1][1][0][0][RTW89_UK][2] = 46,
+ [1][1][0][0][RTW89_FCC][3] = 46,
+ [1][1][0][0][RTW89_ETSI][3] = 46,
+ [1][1][0][0][RTW89_MKK][3] = 64,
+ [1][1][0][0][RTW89_IC][3] = 46,
+ [1][1][0][0][RTW89_KCC][3] = 58,
+ [1][1][0][0][RTW89_ACMA][3] = 46,
+ [1][1][0][0][RTW89_CHILE][3] = 50,
+ [1][1][0][0][RTW89_UKRAINE][3] = 46,
+ [1][1][0][0][RTW89_MEXICO][3] = 46,
+ [1][1][0][0][RTW89_CN][3] = 46,
+ [1][1][0][0][RTW89_QATAR][3] = 46,
+ [1][1][0][0][RTW89_UK][3] = 46,
+ [1][1][0][0][RTW89_FCC][4] = 46,
+ [1][1][0][0][RTW89_ETSI][4] = 46,
+ [1][1][0][0][RTW89_MKK][4] = 64,
+ [1][1][0][0][RTW89_IC][4] = 46,
+ [1][1][0][0][RTW89_KCC][4] = 58,
+ [1][1][0][0][RTW89_ACMA][4] = 46,
+ [1][1][0][0][RTW89_CHILE][4] = 50,
+ [1][1][0][0][RTW89_UKRAINE][4] = 46,
+ [1][1][0][0][RTW89_MEXICO][4] = 46,
+ [1][1][0][0][RTW89_CN][4] = 46,
+ [1][1][0][0][RTW89_QATAR][4] = 46,
+ [1][1][0][0][RTW89_UK][4] = 46,
+ [1][1][0][0][RTW89_FCC][5] = 62,
+ [1][1][0][0][RTW89_ETSI][5] = 46,
+ [1][1][0][0][RTW89_MKK][5] = 64,
+ [1][1][0][0][RTW89_IC][5] = 62,
+ [1][1][0][0][RTW89_KCC][5] = 58,
+ [1][1][0][0][RTW89_ACMA][5] = 46,
+ [1][1][0][0][RTW89_CHILE][5] = 50,
+ [1][1][0][0][RTW89_UKRAINE][5] = 46,
+ [1][1][0][0][RTW89_MEXICO][5] = 62,
+ [1][1][0][0][RTW89_CN][5] = 46,
+ [1][1][0][0][RTW89_QATAR][5] = 46,
+ [1][1][0][0][RTW89_UK][5] = 46,
+ [1][1][0][0][RTW89_FCC][6] = 34,
+ [1][1][0][0][RTW89_ETSI][6] = 46,
+ [1][1][0][0][RTW89_MKK][6] = 64,
+ [1][1][0][0][RTW89_IC][6] = 34,
+ [1][1][0][0][RTW89_KCC][6] = 58,
+ [1][1][0][0][RTW89_ACMA][6] = 46,
+ [1][1][0][0][RTW89_CHILE][6] = 50,
+ [1][1][0][0][RTW89_UKRAINE][6] = 46,
+ [1][1][0][0][RTW89_MEXICO][6] = 34,
+ [1][1][0][0][RTW89_CN][6] = 46,
+ [1][1][0][0][RTW89_QATAR][6] = 46,
+ [1][1][0][0][RTW89_UK][6] = 46,
+ [1][1][0][0][RTW89_FCC][7] = 34,
+ [1][1][0][0][RTW89_ETSI][7] = 46,
+ [1][1][0][0][RTW89_MKK][7] = 64,
+ [1][1][0][0][RTW89_IC][7] = 34,
+ [1][1][0][0][RTW89_KCC][7] = 58,
+ [1][1][0][0][RTW89_ACMA][7] = 46,
+ [1][1][0][0][RTW89_CHILE][7] = 50,
+ [1][1][0][0][RTW89_UKRAINE][7] = 46,
+ [1][1][0][0][RTW89_MEXICO][7] = 34,
+ [1][1][0][0][RTW89_CN][7] = 46,
+ [1][1][0][0][RTW89_QATAR][7] = 46,
+ [1][1][0][0][RTW89_UK][7] = 46,
+ [1][1][0][0][RTW89_FCC][8] = 34,
+ [1][1][0][0][RTW89_ETSI][8] = 46,
+ [1][1][0][0][RTW89_MKK][8] = 64,
+ [1][1][0][0][RTW89_IC][8] = 34,
+ [1][1][0][0][RTW89_KCC][8] = 58,
+ [1][1][0][0][RTW89_ACMA][8] = 46,
+ [1][1][0][0][RTW89_CHILE][8] = 50,
+ [1][1][0][0][RTW89_UKRAINE][8] = 46,
+ [1][1][0][0][RTW89_MEXICO][8] = 34,
+ [1][1][0][0][RTW89_CN][8] = 46,
+ [1][1][0][0][RTW89_QATAR][8] = 46,
+ [1][1][0][0][RTW89_UK][8] = 46,
+ [1][1][0][0][RTW89_FCC][9] = 30,
+ [1][1][0][0][RTW89_ETSI][9] = 46,
+ [1][1][0][0][RTW89_MKK][9] = 64,
+ [1][1][0][0][RTW89_IC][9] = 30,
+ [1][1][0][0][RTW89_KCC][9] = 58,
+ [1][1][0][0][RTW89_ACMA][9] = 46,
+ [1][1][0][0][RTW89_CHILE][9] = 30,
+ [1][1][0][0][RTW89_UKRAINE][9] = 46,
+ [1][1][0][0][RTW89_MEXICO][9] = 30,
+ [1][1][0][0][RTW89_CN][9] = 46,
+ [1][1][0][0][RTW89_QATAR][9] = 46,
+ [1][1][0][0][RTW89_UK][9] = 46,
+ [1][1][0][0][RTW89_FCC][10] = 30,
+ [1][1][0][0][RTW89_ETSI][10] = 46,
+ [1][1][0][0][RTW89_MKK][10] = 64,
+ [1][1][0][0][RTW89_IC][10] = 30,
+ [1][1][0][0][RTW89_KCC][10] = 58,
+ [1][1][0][0][RTW89_ACMA][10] = 46,
+ [1][1][0][0][RTW89_CHILE][10] = 30,
+ [1][1][0][0][RTW89_UKRAINE][10] = 46,
+ [1][1][0][0][RTW89_MEXICO][10] = 30,
+ [1][1][0][0][RTW89_CN][10] = 46,
+ [1][1][0][0][RTW89_QATAR][10] = 46,
+ [1][1][0][0][RTW89_UK][10] = 46,
+ [1][1][0][0][RTW89_FCC][11] = 127,
+ [1][1][0][0][RTW89_ETSI][11] = 127,
+ [1][1][0][0][RTW89_MKK][11] = 127,
+ [1][1][0][0][RTW89_IC][11] = 127,
+ [1][1][0][0][RTW89_KCC][11] = 127,
+ [1][1][0][0][RTW89_ACMA][11] = 127,
+ [1][1][0][0][RTW89_CHILE][11] = 127,
+ [1][1][0][0][RTW89_UKRAINE][11] = 127,
+ [1][1][0][0][RTW89_MEXICO][11] = 127,
+ [1][1][0][0][RTW89_CN][11] = 127,
+ [1][1][0][0][RTW89_QATAR][11] = 127,
+ [1][1][0][0][RTW89_UK][11] = 127,
+ [1][1][0][0][RTW89_FCC][12] = 127,
+ [1][1][0][0][RTW89_ETSI][12] = 127,
+ [1][1][0][0][RTW89_MKK][12] = 127,
+ [1][1][0][0][RTW89_IC][12] = 127,
+ [1][1][0][0][RTW89_KCC][12] = 127,
+ [1][1][0][0][RTW89_ACMA][12] = 127,
+ [1][1][0][0][RTW89_CHILE][12] = 127,
+ [1][1][0][0][RTW89_UKRAINE][12] = 127,
+ [1][1][0][0][RTW89_MEXICO][12] = 127,
+ [1][1][0][0][RTW89_CN][12] = 127,
+ [1][1][0][0][RTW89_QATAR][12] = 127,
+ [1][1][0][0][RTW89_UK][12] = 127,
+ [1][1][0][0][RTW89_FCC][13] = 127,
+ [1][1][0][0][RTW89_ETSI][13] = 127,
+ [1][1][0][0][RTW89_MKK][13] = 127,
+ [1][1][0][0][RTW89_IC][13] = 127,
+ [1][1][0][0][RTW89_KCC][13] = 127,
+ [1][1][0][0][RTW89_ACMA][13] = 127,
+ [1][1][0][0][RTW89_CHILE][13] = 127,
+ [1][1][0][0][RTW89_UKRAINE][13] = 127,
+ [1][1][0][0][RTW89_MEXICO][13] = 127,
+ [1][1][0][0][RTW89_CN][13] = 127,
+ [1][1][0][0][RTW89_QATAR][13] = 127,
+ [1][1][0][0][RTW89_UK][13] = 127,
+ [0][0][1][0][RTW89_FCC][0] = 76,
+ [0][0][1][0][RTW89_ETSI][0] = 58,
+ [0][0][1][0][RTW89_MKK][0] = 74,
+ [0][0][1][0][RTW89_IC][0] = 76,
+ [0][0][1][0][RTW89_KCC][0] = 76,
+ [0][0][1][0][RTW89_ACMA][0] = 58,
+ [0][0][1][0][RTW89_CHILE][0] = 66,
+ [0][0][1][0][RTW89_UKRAINE][0] = 58,
+ [0][0][1][0][RTW89_MEXICO][0] = 76,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 58,
+ [0][0][1][0][RTW89_FCC][1] = 76,
+ [0][0][1][0][RTW89_ETSI][1] = 58,
+ [0][0][1][0][RTW89_MKK][1] = 76,
+ [0][0][1][0][RTW89_IC][1] = 76,
+ [0][0][1][0][RTW89_KCC][1] = 76,
+ [0][0][1][0][RTW89_ACMA][1] = 58,
+ [0][0][1][0][RTW89_CHILE][1] = 66,
+ [0][0][1][0][RTW89_UKRAINE][1] = 58,
+ [0][0][1][0][RTW89_MEXICO][1] = 76,
+ [0][0][1][0][RTW89_CN][1] = 58,
+ [0][0][1][0][RTW89_QATAR][1] = 58,
+ [0][0][1][0][RTW89_UK][1] = 58,
+ [0][0][1][0][RTW89_FCC][2] = 78,
+ [0][0][1][0][RTW89_ETSI][2] = 58,
+ [0][0][1][0][RTW89_MKK][2] = 76,
+ [0][0][1][0][RTW89_IC][2] = 78,
+ [0][0][1][0][RTW89_KCC][2] = 76,
+ [0][0][1][0][RTW89_ACMA][2] = 58,
+ [0][0][1][0][RTW89_CHILE][2] = 66,
+ [0][0][1][0][RTW89_UKRAINE][2] = 58,
+ [0][0][1][0][RTW89_MEXICO][2] = 78,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 58,
+ [0][0][1][0][RTW89_FCC][3] = 78,
+ [0][0][1][0][RTW89_ETSI][3] = 58,
+ [0][0][1][0][RTW89_MKK][3] = 76,
+ [0][0][1][0][RTW89_IC][3] = 78,
+ [0][0][1][0][RTW89_KCC][3] = 76,
+ [0][0][1][0][RTW89_ACMA][3] = 58,
+ [0][0][1][0][RTW89_CHILE][3] = 66,
+ [0][0][1][0][RTW89_UKRAINE][3] = 58,
+ [0][0][1][0][RTW89_MEXICO][3] = 78,
+ [0][0][1][0][RTW89_CN][3] = 58,
+ [0][0][1][0][RTW89_QATAR][3] = 58,
+ [0][0][1][0][RTW89_UK][3] = 58,
+ [0][0][1][0][RTW89_FCC][4] = 78,
+ [0][0][1][0][RTW89_ETSI][4] = 58,
+ [0][0][1][0][RTW89_MKK][4] = 76,
+ [0][0][1][0][RTW89_IC][4] = 78,
+ [0][0][1][0][RTW89_KCC][4] = 76,
+ [0][0][1][0][RTW89_ACMA][4] = 58,
+ [0][0][1][0][RTW89_CHILE][4] = 66,
+ [0][0][1][0][RTW89_UKRAINE][4] = 58,
+ [0][0][1][0][RTW89_MEXICO][4] = 78,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 58,
+ [0][0][1][0][RTW89_FCC][5] = 78,
+ [0][0][1][0][RTW89_ETSI][5] = 58,
+ [0][0][1][0][RTW89_MKK][5] = 76,
+ [0][0][1][0][RTW89_IC][5] = 78,
+ [0][0][1][0][RTW89_KCC][5] = 76,
+ [0][0][1][0][RTW89_ACMA][5] = 58,
+ [0][0][1][0][RTW89_CHILE][5] = 66,
+ [0][0][1][0][RTW89_UKRAINE][5] = 58,
+ [0][0][1][0][RTW89_MEXICO][5] = 78,
+ [0][0][1][0][RTW89_CN][5] = 58,
+ [0][0][1][0][RTW89_QATAR][5] = 58,
+ [0][0][1][0][RTW89_UK][5] = 58,
+ [0][0][1][0][RTW89_FCC][6] = 78,
+ [0][0][1][0][RTW89_ETSI][6] = 58,
+ [0][0][1][0][RTW89_MKK][6] = 76,
+ [0][0][1][0][RTW89_IC][6] = 78,
+ [0][0][1][0][RTW89_KCC][6] = 76,
+ [0][0][1][0][RTW89_ACMA][6] = 58,
+ [0][0][1][0][RTW89_CHILE][6] = 66,
+ [0][0][1][0][RTW89_UKRAINE][6] = 58,
+ [0][0][1][0][RTW89_MEXICO][6] = 78,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 58,
+ [0][0][1][0][RTW89_FCC][7] = 78,
+ [0][0][1][0][RTW89_ETSI][7] = 58,
+ [0][0][1][0][RTW89_MKK][7] = 76,
+ [0][0][1][0][RTW89_IC][7] = 78,
+ [0][0][1][0][RTW89_KCC][7] = 76,
+ [0][0][1][0][RTW89_ACMA][7] = 58,
+ [0][0][1][0][RTW89_CHILE][7] = 66,
+ [0][0][1][0][RTW89_UKRAINE][7] = 58,
+ [0][0][1][0][RTW89_MEXICO][7] = 78,
+ [0][0][1][0][RTW89_CN][7] = 58,
+ [0][0][1][0][RTW89_QATAR][7] = 58,
+ [0][0][1][0][RTW89_UK][7] = 58,
+ [0][0][1][0][RTW89_FCC][8] = 78,
+ [0][0][1][0][RTW89_ETSI][8] = 58,
+ [0][0][1][0][RTW89_MKK][8] = 76,
+ [0][0][1][0][RTW89_IC][8] = 78,
+ [0][0][1][0][RTW89_KCC][8] = 76,
+ [0][0][1][0][RTW89_ACMA][8] = 58,
+ [0][0][1][0][RTW89_CHILE][8] = 66,
+ [0][0][1][0][RTW89_UKRAINE][8] = 58,
+ [0][0][1][0][RTW89_MEXICO][8] = 78,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 58,
+ [0][0][1][0][RTW89_FCC][9] = 74,
+ [0][0][1][0][RTW89_ETSI][9] = 58,
+ [0][0][1][0][RTW89_MKK][9] = 76,
+ [0][0][1][0][RTW89_IC][9] = 74,
+ [0][0][1][0][RTW89_KCC][9] = 76,
+ [0][0][1][0][RTW89_ACMA][9] = 58,
+ [0][0][1][0][RTW89_CHILE][9] = 66,
+ [0][0][1][0][RTW89_UKRAINE][9] = 58,
+ [0][0][1][0][RTW89_MEXICO][9] = 74,
+ [0][0][1][0][RTW89_CN][9] = 58,
+ [0][0][1][0][RTW89_QATAR][9] = 58,
+ [0][0][1][0][RTW89_UK][9] = 58,
+ [0][0][1][0][RTW89_FCC][10] = 74,
+ [0][0][1][0][RTW89_ETSI][10] = 58,
+ [0][0][1][0][RTW89_MKK][10] = 76,
+ [0][0][1][0][RTW89_IC][10] = 74,
+ [0][0][1][0][RTW89_KCC][10] = 76,
+ [0][0][1][0][RTW89_ACMA][10] = 58,
+ [0][0][1][0][RTW89_CHILE][10] = 66,
+ [0][0][1][0][RTW89_UKRAINE][10] = 58,
+ [0][0][1][0][RTW89_MEXICO][10] = 74,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 58,
+ [0][0][1][0][RTW89_FCC][11] = 54,
+ [0][0][1][0][RTW89_ETSI][11] = 58,
+ [0][0][1][0][RTW89_MKK][11] = 76,
+ [0][0][1][0][RTW89_IC][11] = 54,
+ [0][0][1][0][RTW89_KCC][11] = 76,
+ [0][0][1][0][RTW89_ACMA][11] = 58,
+ [0][0][1][0][RTW89_CHILE][11] = 54,
+ [0][0][1][0][RTW89_UKRAINE][11] = 58,
+ [0][0][1][0][RTW89_MEXICO][11] = 54,
+ [0][0][1][0][RTW89_CN][11] = 58,
+ [0][0][1][0][RTW89_QATAR][11] = 58,
+ [0][0][1][0][RTW89_UK][11] = 58,
+ [0][0][1][0][RTW89_FCC][12] = 50,
+ [0][0][1][0][RTW89_ETSI][12] = 58,
+ [0][0][1][0][RTW89_MKK][12] = 76,
+ [0][0][1][0][RTW89_IC][12] = 50,
+ [0][0][1][0][RTW89_KCC][12] = 76,
+ [0][0][1][0][RTW89_ACMA][12] = 58,
+ [0][0][1][0][RTW89_CHILE][12] = 50,
+ [0][0][1][0][RTW89_UKRAINE][12] = 58,
+ [0][0][1][0][RTW89_MEXICO][12] = 50,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 58,
+ [0][0][1][0][RTW89_FCC][13] = 127,
+ [0][0][1][0][RTW89_ETSI][13] = 127,
+ [0][0][1][0][RTW89_MKK][13] = 127,
+ [0][0][1][0][RTW89_IC][13] = 127,
+ [0][0][1][0][RTW89_KCC][13] = 127,
+ [0][0][1][0][RTW89_ACMA][13] = 127,
+ [0][0][1][0][RTW89_CHILE][13] = 127,
+ [0][0][1][0][RTW89_UKRAINE][13] = 127,
+ [0][0][1][0][RTW89_MEXICO][13] = 127,
+ [0][0][1][0][RTW89_CN][13] = 127,
+ [0][0][1][0][RTW89_QATAR][13] = 127,
+ [0][0][1][0][RTW89_UK][13] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 62,
+ [0][1][1][0][RTW89_ETSI][0] = 46,
+ [0][1][1][0][RTW89_MKK][0] = 64,
+ [0][1][1][0][RTW89_IC][0] = 62,
+ [0][1][1][0][RTW89_KCC][0] = 66,
+ [0][1][1][0][RTW89_ACMA][0] = 46,
+ [0][1][1][0][RTW89_CHILE][0] = 50,
+ [0][1][1][0][RTW89_UKRAINE][0] = 46,
+ [0][1][1][0][RTW89_MEXICO][0] = 62,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 46,
+ [0][1][1][0][RTW89_FCC][1] = 62,
+ [0][1][1][0][RTW89_ETSI][1] = 46,
+ [0][1][1][0][RTW89_MKK][1] = 64,
+ [0][1][1][0][RTW89_IC][1] = 62,
+ [0][1][1][0][RTW89_KCC][1] = 66,
+ [0][1][1][0][RTW89_ACMA][1] = 46,
+ [0][1][1][0][RTW89_CHILE][1] = 50,
+ [0][1][1][0][RTW89_UKRAINE][1] = 46,
+ [0][1][1][0][RTW89_MEXICO][1] = 62,
+ [0][1][1][0][RTW89_CN][1] = 46,
+ [0][1][1][0][RTW89_QATAR][1] = 46,
+ [0][1][1][0][RTW89_UK][1] = 46,
+ [0][1][1][0][RTW89_FCC][2] = 66,
+ [0][1][1][0][RTW89_ETSI][2] = 46,
+ [0][1][1][0][RTW89_MKK][2] = 64,
+ [0][1][1][0][RTW89_IC][2] = 66,
+ [0][1][1][0][RTW89_KCC][2] = 66,
+ [0][1][1][0][RTW89_ACMA][2] = 46,
+ [0][1][1][0][RTW89_CHILE][2] = 50,
+ [0][1][1][0][RTW89_UKRAINE][2] = 46,
+ [0][1][1][0][RTW89_MEXICO][2] = 66,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 46,
+ [0][1][1][0][RTW89_FCC][3] = 70,
+ [0][1][1][0][RTW89_ETSI][3] = 46,
+ [0][1][1][0][RTW89_MKK][3] = 64,
+ [0][1][1][0][RTW89_IC][3] = 70,
+ [0][1][1][0][RTW89_KCC][3] = 66,
+ [0][1][1][0][RTW89_ACMA][3] = 46,
+ [0][1][1][0][RTW89_CHILE][3] = 50,
+ [0][1][1][0][RTW89_UKRAINE][3] = 46,
+ [0][1][1][0][RTW89_MEXICO][3] = 70,
+ [0][1][1][0][RTW89_CN][3] = 46,
+ [0][1][1][0][RTW89_QATAR][3] = 46,
+ [0][1][1][0][RTW89_UK][3] = 46,
+ [0][1][1][0][RTW89_FCC][4] = 78,
+ [0][1][1][0][RTW89_ETSI][4] = 46,
+ [0][1][1][0][RTW89_MKK][4] = 64,
+ [0][1][1][0][RTW89_IC][4] = 78,
+ [0][1][1][0][RTW89_KCC][4] = 64,
+ [0][1][1][0][RTW89_ACMA][4] = 46,
+ [0][1][1][0][RTW89_CHILE][4] = 50,
+ [0][1][1][0][RTW89_UKRAINE][4] = 46,
+ [0][1][1][0][RTW89_MEXICO][4] = 78,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 46,
+ [0][1][1][0][RTW89_FCC][5] = 78,
+ [0][1][1][0][RTW89_ETSI][5] = 46,
+ [0][1][1][0][RTW89_MKK][5] = 64,
+ [0][1][1][0][RTW89_IC][5] = 78,
+ [0][1][1][0][RTW89_KCC][5] = 64,
+ [0][1][1][0][RTW89_ACMA][5] = 46,
+ [0][1][1][0][RTW89_CHILE][5] = 50,
+ [0][1][1][0][RTW89_UKRAINE][5] = 46,
+ [0][1][1][0][RTW89_MEXICO][5] = 78,
+ [0][1][1][0][RTW89_CN][5] = 46,
+ [0][1][1][0][RTW89_QATAR][5] = 46,
+ [0][1][1][0][RTW89_UK][5] = 46,
+ [0][1][1][0][RTW89_FCC][6] = 78,
+ [0][1][1][0][RTW89_ETSI][6] = 46,
+ [0][1][1][0][RTW89_MKK][6] = 64,
+ [0][1][1][0][RTW89_IC][6] = 78,
+ [0][1][1][0][RTW89_KCC][6] = 64,
+ [0][1][1][0][RTW89_ACMA][6] = 46,
+ [0][1][1][0][RTW89_CHILE][6] = 50,
+ [0][1][1][0][RTW89_UKRAINE][6] = 46,
+ [0][1][1][0][RTW89_MEXICO][6] = 78,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 46,
+ [0][1][1][0][RTW89_FCC][7] = 70,
+ [0][1][1][0][RTW89_ETSI][7] = 46,
+ [0][1][1][0][RTW89_MKK][7] = 64,
+ [0][1][1][0][RTW89_IC][7] = 70,
+ [0][1][1][0][RTW89_KCC][7] = 64,
+ [0][1][1][0][RTW89_ACMA][7] = 46,
+ [0][1][1][0][RTW89_CHILE][7] = 50,
+ [0][1][1][0][RTW89_UKRAINE][7] = 46,
+ [0][1][1][0][RTW89_MEXICO][7] = 70,
+ [0][1][1][0][RTW89_CN][7] = 46,
+ [0][1][1][0][RTW89_QATAR][7] = 46,
+ [0][1][1][0][RTW89_UK][7] = 46,
+ [0][1][1][0][RTW89_FCC][8] = 66,
+ [0][1][1][0][RTW89_ETSI][8] = 46,
+ [0][1][1][0][RTW89_MKK][8] = 64,
+ [0][1][1][0][RTW89_IC][8] = 66,
+ [0][1][1][0][RTW89_KCC][8] = 64,
+ [0][1][1][0][RTW89_ACMA][8] = 46,
+ [0][1][1][0][RTW89_CHILE][8] = 50,
+ [0][1][1][0][RTW89_UKRAINE][8] = 46,
+ [0][1][1][0][RTW89_MEXICO][8] = 66,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 46,
+ [0][1][1][0][RTW89_FCC][9] = 62,
+ [0][1][1][0][RTW89_ETSI][9] = 46,
+ [0][1][1][0][RTW89_MKK][9] = 64,
+ [0][1][1][0][RTW89_IC][9] = 62,
+ [0][1][1][0][RTW89_KCC][9] = 64,
+ [0][1][1][0][RTW89_ACMA][9] = 46,
+ [0][1][1][0][RTW89_CHILE][9] = 50,
+ [0][1][1][0][RTW89_UKRAINE][9] = 46,
+ [0][1][1][0][RTW89_MEXICO][9] = 62,
+ [0][1][1][0][RTW89_CN][9] = 46,
+ [0][1][1][0][RTW89_QATAR][9] = 46,
+ [0][1][1][0][RTW89_UK][9] = 46,
+ [0][1][1][0][RTW89_FCC][10] = 62,
+ [0][1][1][0][RTW89_ETSI][10] = 46,
+ [0][1][1][0][RTW89_MKK][10] = 64,
+ [0][1][1][0][RTW89_IC][10] = 62,
+ [0][1][1][0][RTW89_KCC][10] = 64,
+ [0][1][1][0][RTW89_ACMA][10] = 46,
+ [0][1][1][0][RTW89_CHILE][10] = 52,
+ [0][1][1][0][RTW89_UKRAINE][10] = 46,
+ [0][1][1][0][RTW89_MEXICO][10] = 62,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 46,
+ [0][1][1][0][RTW89_FCC][11] = 46,
+ [0][1][1][0][RTW89_ETSI][11] = 46,
+ [0][1][1][0][RTW89_MKK][11] = 64,
+ [0][1][1][0][RTW89_IC][11] = 46,
+ [0][1][1][0][RTW89_KCC][11] = 64,
+ [0][1][1][0][RTW89_ACMA][11] = 46,
+ [0][1][1][0][RTW89_CHILE][11] = 46,
+ [0][1][1][0][RTW89_UKRAINE][11] = 46,
+ [0][1][1][0][RTW89_MEXICO][11] = 46,
+ [0][1][1][0][RTW89_CN][11] = 46,
+ [0][1][1][0][RTW89_QATAR][11] = 46,
+ [0][1][1][0][RTW89_UK][11] = 46,
+ [0][1][1][0][RTW89_FCC][12] = 42,
+ [0][1][1][0][RTW89_ETSI][12] = 46,
+ [0][1][1][0][RTW89_MKK][12] = 64,
+ [0][1][1][0][RTW89_IC][12] = 42,
+ [0][1][1][0][RTW89_KCC][12] = 64,
+ [0][1][1][0][RTW89_ACMA][12] = 46,
+ [0][1][1][0][RTW89_CHILE][12] = 42,
+ [0][1][1][0][RTW89_UKRAINE][12] = 46,
+ [0][1][1][0][RTW89_MEXICO][12] = 42,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 46,
+ [0][1][1][0][RTW89_FCC][13] = 127,
+ [0][1][1][0][RTW89_ETSI][13] = 127,
+ [0][1][1][0][RTW89_MKK][13] = 127,
+ [0][1][1][0][RTW89_IC][13] = 127,
+ [0][1][1][0][RTW89_KCC][13] = 127,
+ [0][1][1][0][RTW89_ACMA][13] = 127,
+ [0][1][1][0][RTW89_CHILE][13] = 127,
+ [0][1][1][0][RTW89_UKRAINE][13] = 127,
+ [0][1][1][0][RTW89_MEXICO][13] = 127,
+ [0][1][1][0][RTW89_CN][13] = 127,
+ [0][1][1][0][RTW89_QATAR][13] = 127,
+ [0][1][1][0][RTW89_UK][13] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 76,
+ [0][0][2][0][RTW89_ETSI][0] = 58,
+ [0][0][2][0][RTW89_MKK][0] = 76,
+ [0][0][2][0][RTW89_IC][0] = 76,
+ [0][0][2][0][RTW89_KCC][0] = 76,
+ [0][0][2][0][RTW89_ACMA][0] = 58,
+ [0][0][2][0][RTW89_CHILE][0] = 66,
+ [0][0][2][0][RTW89_UKRAINE][0] = 58,
+ [0][0][2][0][RTW89_MEXICO][0] = 76,
+ [0][0][2][0][RTW89_CN][0] = 58,
+ [0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 58,
+ [0][0][2][0][RTW89_FCC][1] = 76,
+ [0][0][2][0][RTW89_ETSI][1] = 58,
+ [0][0][2][0][RTW89_MKK][1] = 76,
+ [0][0][2][0][RTW89_IC][1] = 76,
+ [0][0][2][0][RTW89_KCC][1] = 76,
+ [0][0][2][0][RTW89_ACMA][1] = 58,
+ [0][0][2][0][RTW89_CHILE][1] = 66,
+ [0][0][2][0][RTW89_UKRAINE][1] = 58,
+ [0][0][2][0][RTW89_MEXICO][1] = 76,
+ [0][0][2][0][RTW89_CN][1] = 58,
+ [0][0][2][0][RTW89_QATAR][1] = 58,
+ [0][0][2][0][RTW89_UK][1] = 58,
+ [0][0][2][0][RTW89_FCC][2] = 78,
+ [0][0][2][0][RTW89_ETSI][2] = 58,
+ [0][0][2][0][RTW89_MKK][2] = 76,
+ [0][0][2][0][RTW89_IC][2] = 78,
+ [0][0][2][0][RTW89_KCC][2] = 76,
+ [0][0][2][0][RTW89_ACMA][2] = 58,
+ [0][0][2][0][RTW89_CHILE][2] = 66,
+ [0][0][2][0][RTW89_UKRAINE][2] = 58,
+ [0][0][2][0][RTW89_MEXICO][2] = 78,
+ [0][0][2][0][RTW89_CN][2] = 58,
+ [0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 58,
+ [0][0][2][0][RTW89_FCC][3] = 78,
+ [0][0][2][0][RTW89_ETSI][3] = 58,
+ [0][0][2][0][RTW89_MKK][3] = 76,
+ [0][0][2][0][RTW89_IC][3] = 78,
+ [0][0][2][0][RTW89_KCC][3] = 76,
+ [0][0][2][0][RTW89_ACMA][3] = 58,
+ [0][0][2][0][RTW89_CHILE][3] = 66,
+ [0][0][2][0][RTW89_UKRAINE][3] = 58,
+ [0][0][2][0][RTW89_MEXICO][3] = 78,
+ [0][0][2][0][RTW89_CN][3] = 58,
+ [0][0][2][0][RTW89_QATAR][3] = 58,
+ [0][0][2][0][RTW89_UK][3] = 58,
+ [0][0][2][0][RTW89_FCC][4] = 78,
+ [0][0][2][0][RTW89_ETSI][4] = 58,
+ [0][0][2][0][RTW89_MKK][4] = 76,
+ [0][0][2][0][RTW89_IC][4] = 78,
+ [0][0][2][0][RTW89_KCC][4] = 76,
+ [0][0][2][0][RTW89_ACMA][4] = 58,
+ [0][0][2][0][RTW89_CHILE][4] = 66,
+ [0][0][2][0][RTW89_UKRAINE][4] = 58,
+ [0][0][2][0][RTW89_MEXICO][4] = 78,
+ [0][0][2][0][RTW89_CN][4] = 58,
+ [0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 58,
+ [0][0][2][0][RTW89_FCC][5] = 78,
+ [0][0][2][0][RTW89_ETSI][5] = 58,
+ [0][0][2][0][RTW89_MKK][5] = 76,
+ [0][0][2][0][RTW89_IC][5] = 78,
+ [0][0][2][0][RTW89_KCC][5] = 76,
+ [0][0][2][0][RTW89_ACMA][5] = 58,
+ [0][0][2][0][RTW89_CHILE][5] = 66,
+ [0][0][2][0][RTW89_UKRAINE][5] = 58,
+ [0][0][2][0][RTW89_MEXICO][5] = 78,
+ [0][0][2][0][RTW89_CN][5] = 58,
+ [0][0][2][0][RTW89_QATAR][5] = 58,
+ [0][0][2][0][RTW89_UK][5] = 58,
+ [0][0][2][0][RTW89_FCC][6] = 78,
+ [0][0][2][0][RTW89_ETSI][6] = 58,
+ [0][0][2][0][RTW89_MKK][6] = 76,
+ [0][0][2][0][RTW89_IC][6] = 78,
+ [0][0][2][0][RTW89_KCC][6] = 76,
+ [0][0][2][0][RTW89_ACMA][6] = 58,
+ [0][0][2][0][RTW89_CHILE][6] = 66,
+ [0][0][2][0][RTW89_UKRAINE][6] = 58,
+ [0][0][2][0][RTW89_MEXICO][6] = 78,
+ [0][0][2][0][RTW89_CN][6] = 58,
+ [0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 58,
+ [0][0][2][0][RTW89_FCC][7] = 78,
+ [0][0][2][0][RTW89_ETSI][7] = 58,
+ [0][0][2][0][RTW89_MKK][7] = 76,
+ [0][0][2][0][RTW89_IC][7] = 78,
+ [0][0][2][0][RTW89_KCC][7] = 76,
+ [0][0][2][0][RTW89_ACMA][7] = 58,
+ [0][0][2][0][RTW89_CHILE][7] = 66,
+ [0][0][2][0][RTW89_UKRAINE][7] = 58,
+ [0][0][2][0][RTW89_MEXICO][7] = 78,
+ [0][0][2][0][RTW89_CN][7] = 58,
+ [0][0][2][0][RTW89_QATAR][7] = 58,
+ [0][0][2][0][RTW89_UK][7] = 58,
+ [0][0][2][0][RTW89_FCC][8] = 76,
+ [0][0][2][0][RTW89_ETSI][8] = 58,
+ [0][0][2][0][RTW89_MKK][8] = 76,
+ [0][0][2][0][RTW89_IC][8] = 76,
+ [0][0][2][0][RTW89_KCC][8] = 76,
+ [0][0][2][0][RTW89_ACMA][8] = 58,
+ [0][0][2][0][RTW89_CHILE][8] = 66,
+ [0][0][2][0][RTW89_UKRAINE][8] = 58,
+ [0][0][2][0][RTW89_MEXICO][8] = 76,
+ [0][0][2][0][RTW89_CN][8] = 58,
+ [0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 58,
+ [0][0][2][0][RTW89_FCC][9] = 72,
+ [0][0][2][0][RTW89_ETSI][9] = 58,
+ [0][0][2][0][RTW89_MKK][9] = 76,
+ [0][0][2][0][RTW89_IC][9] = 72,
+ [0][0][2][0][RTW89_KCC][9] = 76,
+ [0][0][2][0][RTW89_ACMA][9] = 58,
+ [0][0][2][0][RTW89_CHILE][9] = 66,
+ [0][0][2][0][RTW89_UKRAINE][9] = 58,
+ [0][0][2][0][RTW89_MEXICO][9] = 72,
+ [0][0][2][0][RTW89_CN][9] = 58,
+ [0][0][2][0][RTW89_QATAR][9] = 58,
+ [0][0][2][0][RTW89_UK][9] = 58,
+ [0][0][2][0][RTW89_FCC][10] = 72,
+ [0][0][2][0][RTW89_ETSI][10] = 58,
+ [0][0][2][0][RTW89_MKK][10] = 76,
+ [0][0][2][0][RTW89_IC][10] = 72,
+ [0][0][2][0][RTW89_KCC][10] = 76,
+ [0][0][2][0][RTW89_ACMA][10] = 58,
+ [0][0][2][0][RTW89_CHILE][10] = 66,
+ [0][0][2][0][RTW89_UKRAINE][10] = 58,
+ [0][0][2][0][RTW89_MEXICO][10] = 72,
+ [0][0][2][0][RTW89_CN][10] = 58,
+ [0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 58,
+ [0][0][2][0][RTW89_FCC][11] = 54,
+ [0][0][2][0][RTW89_ETSI][11] = 58,
+ [0][0][2][0][RTW89_MKK][11] = 76,
+ [0][0][2][0][RTW89_IC][11] = 54,
+ [0][0][2][0][RTW89_KCC][11] = 76,
+ [0][0][2][0][RTW89_ACMA][11] = 58,
+ [0][0][2][0][RTW89_CHILE][11] = 54,
+ [0][0][2][0][RTW89_UKRAINE][11] = 58,
+ [0][0][2][0][RTW89_MEXICO][11] = 54,
+ [0][0][2][0][RTW89_CN][11] = 58,
+ [0][0][2][0][RTW89_QATAR][11] = 58,
+ [0][0][2][0][RTW89_UK][11] = 58,
+ [0][0][2][0][RTW89_FCC][12] = 50,
+ [0][0][2][0][RTW89_ETSI][12] = 58,
+ [0][0][2][0][RTW89_MKK][12] = 76,
+ [0][0][2][0][RTW89_IC][12] = 50,
+ [0][0][2][0][RTW89_KCC][12] = 76,
+ [0][0][2][0][RTW89_ACMA][12] = 58,
+ [0][0][2][0][RTW89_CHILE][12] = 50,
+ [0][0][2][0][RTW89_UKRAINE][12] = 58,
+ [0][0][2][0][RTW89_MEXICO][12] = 50,
+ [0][0][2][0][RTW89_CN][12] = 58,
+ [0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 58,
+ [0][0][2][0][RTW89_FCC][13] = 127,
+ [0][0][2][0][RTW89_ETSI][13] = 127,
+ [0][0][2][0][RTW89_MKK][13] = 127,
+ [0][0][2][0][RTW89_IC][13] = 127,
+ [0][0][2][0][RTW89_KCC][13] = 127,
+ [0][0][2][0][RTW89_ACMA][13] = 127,
+ [0][0][2][0][RTW89_CHILE][13] = 127,
+ [0][0][2][0][RTW89_UKRAINE][13] = 127,
+ [0][0][2][0][RTW89_MEXICO][13] = 127,
+ [0][0][2][0][RTW89_CN][13] = 127,
+ [0][0][2][0][RTW89_QATAR][13] = 127,
+ [0][0][2][0][RTW89_UK][13] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 58,
+ [0][1][2][0][RTW89_ETSI][0] = 46,
+ [0][1][2][0][RTW89_MKK][0] = 66,
+ [0][1][2][0][RTW89_IC][0] = 58,
+ [0][1][2][0][RTW89_KCC][0] = 62,
+ [0][1][2][0][RTW89_ACMA][0] = 46,
+ [0][1][2][0][RTW89_CHILE][0] = 50,
+ [0][1][2][0][RTW89_UKRAINE][0] = 46,
+ [0][1][2][0][RTW89_MEXICO][0] = 58,
+ [0][1][2][0][RTW89_CN][0] = 46,
+ [0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 46,
+ [0][1][2][0][RTW89_FCC][1] = 58,
+ [0][1][2][0][RTW89_ETSI][1] = 46,
+ [0][1][2][0][RTW89_MKK][1] = 66,
+ [0][1][2][0][RTW89_IC][1] = 58,
+ [0][1][2][0][RTW89_KCC][1] = 62,
+ [0][1][2][0][RTW89_ACMA][1] = 46,
+ [0][1][2][0][RTW89_CHILE][1] = 50,
+ [0][1][2][0][RTW89_UKRAINE][1] = 46,
+ [0][1][2][0][RTW89_MEXICO][1] = 58,
+ [0][1][2][0][RTW89_CN][1] = 46,
+ [0][1][2][0][RTW89_QATAR][1] = 46,
+ [0][1][2][0][RTW89_UK][1] = 46,
+ [0][1][2][0][RTW89_FCC][2] = 62,
+ [0][1][2][0][RTW89_ETSI][2] = 46,
+ [0][1][2][0][RTW89_MKK][2] = 66,
+ [0][1][2][0][RTW89_IC][2] = 62,
+ [0][1][2][0][RTW89_KCC][2] = 62,
+ [0][1][2][0][RTW89_ACMA][2] = 46,
+ [0][1][2][0][RTW89_CHILE][2] = 50,
+ [0][1][2][0][RTW89_UKRAINE][2] = 46,
+ [0][1][2][0][RTW89_MEXICO][2] = 62,
+ [0][1][2][0][RTW89_CN][2] = 46,
+ [0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 46,
+ [0][1][2][0][RTW89_FCC][3] = 66,
+ [0][1][2][0][RTW89_ETSI][3] = 46,
+ [0][1][2][0][RTW89_MKK][3] = 66,
+ [0][1][2][0][RTW89_IC][3] = 66,
+ [0][1][2][0][RTW89_KCC][3] = 62,
+ [0][1][2][0][RTW89_ACMA][3] = 46,
+ [0][1][2][0][RTW89_CHILE][3] = 50,
+ [0][1][2][0][RTW89_UKRAINE][3] = 46,
+ [0][1][2][0][RTW89_MEXICO][3] = 66,
+ [0][1][2][0][RTW89_CN][3] = 46,
+ [0][1][2][0][RTW89_QATAR][3] = 46,
+ [0][1][2][0][RTW89_UK][3] = 46,
+ [0][1][2][0][RTW89_FCC][4] = 72,
+ [0][1][2][0][RTW89_ETSI][4] = 46,
+ [0][1][2][0][RTW89_MKK][4] = 66,
+ [0][1][2][0][RTW89_IC][4] = 72,
+ [0][1][2][0][RTW89_KCC][4] = 62,
+ [0][1][2][0][RTW89_ACMA][4] = 46,
+ [0][1][2][0][RTW89_CHILE][4] = 50,
+ [0][1][2][0][RTW89_UKRAINE][4] = 46,
+ [0][1][2][0][RTW89_MEXICO][4] = 72,
+ [0][1][2][0][RTW89_CN][4] = 46,
+ [0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 46,
+ [0][1][2][0][RTW89_FCC][5] = 78,
+ [0][1][2][0][RTW89_ETSI][5] = 46,
+ [0][1][2][0][RTW89_MKK][5] = 66,
+ [0][1][2][0][RTW89_IC][5] = 78,
+ [0][1][2][0][RTW89_KCC][5] = 62,
+ [0][1][2][0][RTW89_ACMA][5] = 46,
+ [0][1][2][0][RTW89_CHILE][5] = 50,
+ [0][1][2][0][RTW89_UKRAINE][5] = 46,
+ [0][1][2][0][RTW89_MEXICO][5] = 78,
+ [0][1][2][0][RTW89_CN][5] = 46,
+ [0][1][2][0][RTW89_QATAR][5] = 46,
+ [0][1][2][0][RTW89_UK][5] = 46,
+ [0][1][2][0][RTW89_FCC][6] = 74,
+ [0][1][2][0][RTW89_ETSI][6] = 46,
+ [0][1][2][0][RTW89_MKK][6] = 66,
+ [0][1][2][0][RTW89_IC][6] = 74,
+ [0][1][2][0][RTW89_KCC][6] = 62,
+ [0][1][2][0][RTW89_ACMA][6] = 46,
+ [0][1][2][0][RTW89_CHILE][6] = 50,
+ [0][1][2][0][RTW89_UKRAINE][6] = 46,
+ [0][1][2][0][RTW89_MEXICO][6] = 74,
+ [0][1][2][0][RTW89_CN][6] = 46,
+ [0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 46,
+ [0][1][2][0][RTW89_FCC][7] = 66,
+ [0][1][2][0][RTW89_ETSI][7] = 46,
+ [0][1][2][0][RTW89_MKK][7] = 66,
+ [0][1][2][0][RTW89_IC][7] = 66,
+ [0][1][2][0][RTW89_KCC][7] = 62,
+ [0][1][2][0][RTW89_ACMA][7] = 46,
+ [0][1][2][0][RTW89_CHILE][7] = 50,
+ [0][1][2][0][RTW89_UKRAINE][7] = 46,
+ [0][1][2][0][RTW89_MEXICO][7] = 66,
+ [0][1][2][0][RTW89_CN][7] = 46,
+ [0][1][2][0][RTW89_QATAR][7] = 46,
+ [0][1][2][0][RTW89_UK][7] = 46,
+ [0][1][2][0][RTW89_FCC][8] = 62,
+ [0][1][2][0][RTW89_ETSI][8] = 46,
+ [0][1][2][0][RTW89_MKK][8] = 66,
+ [0][1][2][0][RTW89_IC][8] = 62,
+ [0][1][2][0][RTW89_KCC][8] = 62,
+ [0][1][2][0][RTW89_ACMA][8] = 46,
+ [0][1][2][0][RTW89_CHILE][8] = 50,
+ [0][1][2][0][RTW89_UKRAINE][8] = 46,
+ [0][1][2][0][RTW89_MEXICO][8] = 62,
+ [0][1][2][0][RTW89_CN][8] = 46,
+ [0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 46,
+ [0][1][2][0][RTW89_FCC][9] = 58,
+ [0][1][2][0][RTW89_ETSI][9] = 46,
+ [0][1][2][0][RTW89_MKK][9] = 66,
+ [0][1][2][0][RTW89_IC][9] = 58,
+ [0][1][2][0][RTW89_KCC][9] = 60,
+ [0][1][2][0][RTW89_ACMA][9] = 46,
+ [0][1][2][0][RTW89_CHILE][9] = 50,
+ [0][1][2][0][RTW89_UKRAINE][9] = 46,
+ [0][1][2][0][RTW89_MEXICO][9] = 58,
+ [0][1][2][0][RTW89_CN][9] = 46,
+ [0][1][2][0][RTW89_QATAR][9] = 46,
+ [0][1][2][0][RTW89_UK][9] = 46,
+ [0][1][2][0][RTW89_FCC][10] = 58,
+ [0][1][2][0][RTW89_ETSI][10] = 46,
+ [0][1][2][0][RTW89_MKK][10] = 66,
+ [0][1][2][0][RTW89_IC][10] = 58,
+ [0][1][2][0][RTW89_KCC][10] = 60,
+ [0][1][2][0][RTW89_ACMA][10] = 46,
+ [0][1][2][0][RTW89_CHILE][10] = 50,
+ [0][1][2][0][RTW89_UKRAINE][10] = 46,
+ [0][1][2][0][RTW89_MEXICO][10] = 58,
+ [0][1][2][0][RTW89_CN][10] = 46,
+ [0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 46,
+ [0][1][2][0][RTW89_FCC][11] = 46,
+ [0][1][2][0][RTW89_ETSI][11] = 46,
+ [0][1][2][0][RTW89_MKK][11] = 66,
+ [0][1][2][0][RTW89_IC][11] = 46,
+ [0][1][2][0][RTW89_KCC][11] = 60,
+ [0][1][2][0][RTW89_ACMA][11] = 46,
+ [0][1][2][0][RTW89_CHILE][11] = 46,
+ [0][1][2][0][RTW89_UKRAINE][11] = 46,
+ [0][1][2][0][RTW89_MEXICO][11] = 46,
+ [0][1][2][0][RTW89_CN][11] = 46,
+ [0][1][2][0][RTW89_QATAR][11] = 46,
+ [0][1][2][0][RTW89_UK][11] = 46,
+ [0][1][2][0][RTW89_FCC][12] = 42,
+ [0][1][2][0][RTW89_ETSI][12] = 46,
+ [0][1][2][0][RTW89_MKK][12] = 66,
+ [0][1][2][0][RTW89_IC][12] = 42,
+ [0][1][2][0][RTW89_KCC][12] = 60,
+ [0][1][2][0][RTW89_ACMA][12] = 46,
+ [0][1][2][0][RTW89_CHILE][12] = 42,
+ [0][1][2][0][RTW89_UKRAINE][12] = 46,
+ [0][1][2][0][RTW89_MEXICO][12] = 42,
+ [0][1][2][0][RTW89_CN][12] = 46,
+ [0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 46,
+ [0][1][2][0][RTW89_FCC][13] = 127,
+ [0][1][2][0][RTW89_ETSI][13] = 127,
+ [0][1][2][0][RTW89_MKK][13] = 127,
+ [0][1][2][0][RTW89_IC][13] = 127,
+ [0][1][2][0][RTW89_KCC][13] = 127,
+ [0][1][2][0][RTW89_ACMA][13] = 127,
+ [0][1][2][0][RTW89_CHILE][13] = 127,
+ [0][1][2][0][RTW89_UKRAINE][13] = 127,
+ [0][1][2][0][RTW89_MEXICO][13] = 127,
+ [0][1][2][0][RTW89_CN][13] = 127,
+ [0][1][2][0][RTW89_QATAR][13] = 127,
+ [0][1][2][0][RTW89_UK][13] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 58,
+ [0][1][2][1][RTW89_ETSI][0] = 34,
+ [0][1][2][1][RTW89_MKK][0] = 66,
+ [0][1][2][1][RTW89_IC][0] = 58,
+ [0][1][2][1][RTW89_KCC][0] = 62,
+ [0][1][2][1][RTW89_ACMA][0] = 34,
+ [0][1][2][1][RTW89_CHILE][0] = 42,
+ [0][1][2][1][RTW89_UKRAINE][0] = 34,
+ [0][1][2][1][RTW89_MEXICO][0] = 58,
+ [0][1][2][1][RTW89_CN][0] = 34,
+ [0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_UK][0] = 34,
+ [0][1][2][1][RTW89_FCC][1] = 58,
+ [0][1][2][1][RTW89_ETSI][1] = 34,
+ [0][1][2][1][RTW89_MKK][1] = 66,
+ [0][1][2][1][RTW89_IC][1] = 58,
+ [0][1][2][1][RTW89_KCC][1] = 62,
+ [0][1][2][1][RTW89_ACMA][1] = 34,
+ [0][1][2][1][RTW89_CHILE][1] = 40,
+ [0][1][2][1][RTW89_UKRAINE][1] = 34,
+ [0][1][2][1][RTW89_MEXICO][1] = 58,
+ [0][1][2][1][RTW89_CN][1] = 34,
+ [0][1][2][1][RTW89_QATAR][1] = 34,
+ [0][1][2][1][RTW89_UK][1] = 34,
+ [0][1][2][1][RTW89_FCC][2] = 62,
+ [0][1][2][1][RTW89_ETSI][2] = 34,
+ [0][1][2][1][RTW89_MKK][2] = 66,
+ [0][1][2][1][RTW89_IC][2] = 62,
+ [0][1][2][1][RTW89_KCC][2] = 62,
+ [0][1][2][1][RTW89_ACMA][2] = 34,
+ [0][1][2][1][RTW89_CHILE][2] = 40,
+ [0][1][2][1][RTW89_UKRAINE][2] = 34,
+ [0][1][2][1][RTW89_MEXICO][2] = 62,
+ [0][1][2][1][RTW89_CN][2] = 34,
+ [0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 34,
+ [0][1][2][1][RTW89_FCC][3] = 66,
+ [0][1][2][1][RTW89_ETSI][3] = 34,
+ [0][1][2][1][RTW89_MKK][3] = 66,
+ [0][1][2][1][RTW89_IC][3] = 66,
+ [0][1][2][1][RTW89_KCC][3] = 62,
+ [0][1][2][1][RTW89_ACMA][3] = 34,
+ [0][1][2][1][RTW89_CHILE][3] = 40,
+ [0][1][2][1][RTW89_UKRAINE][3] = 34,
+ [0][1][2][1][RTW89_MEXICO][3] = 66,
+ [0][1][2][1][RTW89_CN][3] = 34,
+ [0][1][2][1][RTW89_QATAR][3] = 34,
+ [0][1][2][1][RTW89_UK][3] = 34,
+ [0][1][2][1][RTW89_FCC][4] = 72,
+ [0][1][2][1][RTW89_ETSI][4] = 34,
+ [0][1][2][1][RTW89_MKK][4] = 66,
+ [0][1][2][1][RTW89_IC][4] = 72,
+ [0][1][2][1][RTW89_KCC][4] = 62,
+ [0][1][2][1][RTW89_ACMA][4] = 34,
+ [0][1][2][1][RTW89_CHILE][4] = 40,
+ [0][1][2][1][RTW89_UKRAINE][4] = 34,
+ [0][1][2][1][RTW89_MEXICO][4] = 72,
+ [0][1][2][1][RTW89_CN][4] = 34,
+ [0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 34,
+ [0][1][2][1][RTW89_FCC][5] = 78,
+ [0][1][2][1][RTW89_ETSI][5] = 34,
+ [0][1][2][1][RTW89_MKK][5] = 66,
+ [0][1][2][1][RTW89_IC][5] = 78,
+ [0][1][2][1][RTW89_KCC][5] = 62,
+ [0][1][2][1][RTW89_ACMA][5] = 34,
+ [0][1][2][1][RTW89_CHILE][5] = 42,
+ [0][1][2][1][RTW89_UKRAINE][5] = 34,
+ [0][1][2][1][RTW89_MEXICO][5] = 78,
+ [0][1][2][1][RTW89_CN][5] = 34,
+ [0][1][2][1][RTW89_QATAR][5] = 34,
+ [0][1][2][1][RTW89_UK][5] = 34,
+ [0][1][2][1][RTW89_FCC][6] = 74,
+ [0][1][2][1][RTW89_ETSI][6] = 34,
+ [0][1][2][1][RTW89_MKK][6] = 66,
+ [0][1][2][1][RTW89_IC][6] = 74,
+ [0][1][2][1][RTW89_KCC][6] = 62,
+ [0][1][2][1][RTW89_ACMA][6] = 34,
+ [0][1][2][1][RTW89_CHILE][6] = 40,
+ [0][1][2][1][RTW89_UKRAINE][6] = 34,
+ [0][1][2][1][RTW89_MEXICO][6] = 74,
+ [0][1][2][1][RTW89_CN][6] = 34,
+ [0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 34,
+ [0][1][2][1][RTW89_FCC][7] = 66,
+ [0][1][2][1][RTW89_ETSI][7] = 34,
+ [0][1][2][1][RTW89_MKK][7] = 66,
+ [0][1][2][1][RTW89_IC][7] = 66,
+ [0][1][2][1][RTW89_KCC][7] = 62,
+ [0][1][2][1][RTW89_ACMA][7] = 34,
+ [0][1][2][1][RTW89_CHILE][7] = 40,
+ [0][1][2][1][RTW89_UKRAINE][7] = 34,
+ [0][1][2][1][RTW89_MEXICO][7] = 66,
+ [0][1][2][1][RTW89_CN][7] = 34,
+ [0][1][2][1][RTW89_QATAR][7] = 34,
+ [0][1][2][1][RTW89_UK][7] = 34,
+ [0][1][2][1][RTW89_FCC][8] = 62,
+ [0][1][2][1][RTW89_ETSI][8] = 34,
+ [0][1][2][1][RTW89_MKK][8] = 66,
+ [0][1][2][1][RTW89_IC][8] = 62,
+ [0][1][2][1][RTW89_KCC][8] = 62,
+ [0][1][2][1][RTW89_ACMA][8] = 34,
+ [0][1][2][1][RTW89_CHILE][8] = 40,
+ [0][1][2][1][RTW89_UKRAINE][8] = 34,
+ [0][1][2][1][RTW89_MEXICO][8] = 62,
+ [0][1][2][1][RTW89_CN][8] = 34,
+ [0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 34,
+ [0][1][2][1][RTW89_FCC][9] = 58,
+ [0][1][2][1][RTW89_ETSI][9] = 34,
+ [0][1][2][1][RTW89_MKK][9] = 66,
+ [0][1][2][1][RTW89_IC][9] = 58,
+ [0][1][2][1][RTW89_KCC][9] = 60,
+ [0][1][2][1][RTW89_ACMA][9] = 34,
+ [0][1][2][1][RTW89_CHILE][9] = 40,
+ [0][1][2][1][RTW89_UKRAINE][9] = 34,
+ [0][1][2][1][RTW89_MEXICO][9] = 58,
+ [0][1][2][1][RTW89_CN][9] = 34,
+ [0][1][2][1][RTW89_QATAR][9] = 34,
+ [0][1][2][1][RTW89_UK][9] = 34,
+ [0][1][2][1][RTW89_FCC][10] = 58,
+ [0][1][2][1][RTW89_ETSI][10] = 34,
+ [0][1][2][1][RTW89_MKK][10] = 66,
+ [0][1][2][1][RTW89_IC][10] = 58,
+ [0][1][2][1][RTW89_KCC][10] = 60,
+ [0][1][2][1][RTW89_ACMA][10] = 34,
+ [0][1][2][1][RTW89_CHILE][10] = 40,
+ [0][1][2][1][RTW89_UKRAINE][10] = 34,
+ [0][1][2][1][RTW89_MEXICO][10] = 58,
+ [0][1][2][1][RTW89_CN][10] = 34,
+ [0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 34,
+ [0][1][2][1][RTW89_FCC][11] = 46,
+ [0][1][2][1][RTW89_ETSI][11] = 34,
+ [0][1][2][1][RTW89_MKK][11] = 66,
+ [0][1][2][1][RTW89_IC][11] = 46,
+ [0][1][2][1][RTW89_KCC][11] = 60,
+ [0][1][2][1][RTW89_ACMA][11] = 34,
+ [0][1][2][1][RTW89_CHILE][11] = 40,
+ [0][1][2][1][RTW89_UKRAINE][11] = 34,
+ [0][1][2][1][RTW89_MEXICO][11] = 46,
+ [0][1][2][1][RTW89_CN][11] = 34,
+ [0][1][2][1][RTW89_QATAR][11] = 34,
+ [0][1][2][1][RTW89_UK][11] = 34,
+ [0][1][2][1][RTW89_FCC][12] = 42,
+ [0][1][2][1][RTW89_ETSI][12] = 34,
+ [0][1][2][1][RTW89_MKK][12] = 66,
+ [0][1][2][1][RTW89_IC][12] = 42,
+ [0][1][2][1][RTW89_KCC][12] = 60,
+ [0][1][2][1][RTW89_ACMA][12] = 34,
+ [0][1][2][1][RTW89_CHILE][12] = 40,
+ [0][1][2][1][RTW89_UKRAINE][12] = 34,
+ [0][1][2][1][RTW89_MEXICO][12] = 42,
+ [0][1][2][1][RTW89_CN][12] = 34,
+ [0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 34,
+ [0][1][2][1][RTW89_FCC][13] = 127,
+ [0][1][2][1][RTW89_ETSI][13] = 127,
+ [0][1][2][1][RTW89_MKK][13] = 127,
+ [0][1][2][1][RTW89_IC][13] = 127,
+ [0][1][2][1][RTW89_KCC][13] = 127,
+ [0][1][2][1][RTW89_ACMA][13] = 127,
+ [0][1][2][1][RTW89_CHILE][13] = 127,
+ [0][1][2][1][RTW89_UKRAINE][13] = 127,
+ [0][1][2][1][RTW89_MEXICO][13] = 127,
+ [0][1][2][1][RTW89_CN][13] = 127,
+ [0][1][2][1][RTW89_QATAR][13] = 127,
+ [0][1][2][1][RTW89_UK][13] = 127,
+ [1][0][2][0][RTW89_FCC][0] = 127,
+ [1][0][2][0][RTW89_ETSI][0] = 127,
+ [1][0][2][0][RTW89_MKK][0] = 127,
+ [1][0][2][0][RTW89_IC][0] = 127,
+ [1][0][2][0][RTW89_KCC][0] = 127,
+ [1][0][2][0][RTW89_ACMA][0] = 127,
+ [1][0][2][0][RTW89_CHILE][0] = 127,
+ [1][0][2][0][RTW89_UKRAINE][0] = 127,
+ [1][0][2][0][RTW89_MEXICO][0] = 127,
+ [1][0][2][0][RTW89_CN][0] = 127,
+ [1][0][2][0][RTW89_QATAR][0] = 127,
+ [1][0][2][0][RTW89_UK][0] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 127,
+ [1][0][2][0][RTW89_ETSI][1] = 127,
+ [1][0][2][0][RTW89_MKK][1] = 127,
+ [1][0][2][0][RTW89_IC][1] = 127,
+ [1][0][2][0][RTW89_KCC][1] = 127,
+ [1][0][2][0][RTW89_ACMA][1] = 127,
+ [1][0][2][0][RTW89_CHILE][1] = 127,
+ [1][0][2][0][RTW89_UKRAINE][1] = 127,
+ [1][0][2][0][RTW89_MEXICO][1] = 127,
+ [1][0][2][0][RTW89_CN][1] = 127,
+ [1][0][2][0][RTW89_QATAR][1] = 127,
+ [1][0][2][0][RTW89_UK][1] = 127,
+ [1][0][2][0][RTW89_FCC][2] = 70,
+ [1][0][2][0][RTW89_ETSI][2] = 58,
+ [1][0][2][0][RTW89_MKK][2] = 74,
+ [1][0][2][0][RTW89_IC][2] = 70,
+ [1][0][2][0][RTW89_KCC][2] = 74,
+ [1][0][2][0][RTW89_ACMA][2] = 58,
+ [1][0][2][0][RTW89_CHILE][2] = 66,
+ [1][0][2][0][RTW89_UKRAINE][2] = 58,
+ [1][0][2][0][RTW89_MEXICO][2] = 70,
+ [1][0][2][0][RTW89_CN][2] = 58,
+ [1][0][2][0][RTW89_QATAR][2] = 58,
+ [1][0][2][0][RTW89_UK][2] = 58,
+ [1][0][2][0][RTW89_FCC][3] = 70,
+ [1][0][2][0][RTW89_ETSI][3] = 58,
+ [1][0][2][0][RTW89_MKK][3] = 74,
+ [1][0][2][0][RTW89_IC][3] = 70,
+ [1][0][2][0][RTW89_KCC][3] = 74,
+ [1][0][2][0][RTW89_ACMA][3] = 58,
+ [1][0][2][0][RTW89_CHILE][3] = 66,
+ [1][0][2][0][RTW89_UKRAINE][3] = 58,
+ [1][0][2][0][RTW89_MEXICO][3] = 70,
+ [1][0][2][0][RTW89_CN][3] = 58,
+ [1][0][2][0][RTW89_QATAR][3] = 58,
+ [1][0][2][0][RTW89_UK][3] = 58,
+ [1][0][2][0][RTW89_FCC][4] = 72,
+ [1][0][2][0][RTW89_ETSI][4] = 58,
+ [1][0][2][0][RTW89_MKK][4] = 74,
+ [1][0][2][0][RTW89_IC][4] = 72,
+ [1][0][2][0][RTW89_KCC][4] = 74,
+ [1][0][2][0][RTW89_ACMA][4] = 58,
+ [1][0][2][0][RTW89_CHILE][4] = 66,
+ [1][0][2][0][RTW89_UKRAINE][4] = 58,
+ [1][0][2][0][RTW89_MEXICO][4] = 72,
+ [1][0][2][0][RTW89_CN][4] = 58,
+ [1][0][2][0][RTW89_QATAR][4] = 58,
+ [1][0][2][0][RTW89_UK][4] = 58,
+ [1][0][2][0][RTW89_FCC][5] = 72,
+ [1][0][2][0][RTW89_ETSI][5] = 58,
+ [1][0][2][0][RTW89_MKK][5] = 74,
+ [1][0][2][0][RTW89_IC][5] = 72,
+ [1][0][2][0][RTW89_KCC][5] = 74,
+ [1][0][2][0][RTW89_ACMA][5] = 58,
+ [1][0][2][0][RTW89_CHILE][5] = 66,
+ [1][0][2][0][RTW89_UKRAINE][5] = 58,
+ [1][0][2][0][RTW89_MEXICO][5] = 72,
+ [1][0][2][0][RTW89_CN][5] = 58,
+ [1][0][2][0][RTW89_QATAR][5] = 58,
+ [1][0][2][0][RTW89_UK][5] = 58,
+ [1][0][2][0][RTW89_FCC][6] = 72,
+ [1][0][2][0][RTW89_ETSI][6] = 58,
+ [1][0][2][0][RTW89_MKK][6] = 74,
+ [1][0][2][0][RTW89_IC][6] = 72,
+ [1][0][2][0][RTW89_KCC][6] = 74,
+ [1][0][2][0][RTW89_ACMA][6] = 58,
+ [1][0][2][0][RTW89_CHILE][6] = 66,
+ [1][0][2][0][RTW89_UKRAINE][6] = 58,
+ [1][0][2][0][RTW89_MEXICO][6] = 72,
+ [1][0][2][0][RTW89_CN][6] = 58,
+ [1][0][2][0][RTW89_QATAR][6] = 58,
+ [1][0][2][0][RTW89_UK][6] = 58,
+ [1][0][2][0][RTW89_FCC][7] = 68,
+ [1][0][2][0][RTW89_ETSI][7] = 58,
+ [1][0][2][0][RTW89_MKK][7] = 74,
+ [1][0][2][0][RTW89_IC][7] = 68,
+ [1][0][2][0][RTW89_KCC][7] = 74,
+ [1][0][2][0][RTW89_ACMA][7] = 58,
+ [1][0][2][0][RTW89_CHILE][7] = 66,
+ [1][0][2][0][RTW89_UKRAINE][7] = 58,
+ [1][0][2][0][RTW89_MEXICO][7] = 68,
+ [1][0][2][0][RTW89_CN][7] = 58,
+ [1][0][2][0][RTW89_QATAR][7] = 58,
+ [1][0][2][0][RTW89_UK][7] = 58,
+ [1][0][2][0][RTW89_FCC][8] = 68,
+ [1][0][2][0][RTW89_ETSI][8] = 58,
+ [1][0][2][0][RTW89_MKK][8] = 74,
+ [1][0][2][0][RTW89_IC][8] = 68,
+ [1][0][2][0][RTW89_KCC][8] = 74,
+ [1][0][2][0][RTW89_ACMA][8] = 58,
+ [1][0][2][0][RTW89_CHILE][8] = 66,
+ [1][0][2][0][RTW89_UKRAINE][8] = 58,
+ [1][0][2][0][RTW89_MEXICO][8] = 68,
+ [1][0][2][0][RTW89_CN][8] = 58,
+ [1][0][2][0][RTW89_QATAR][8] = 58,
+ [1][0][2][0][RTW89_UK][8] = 58,
+ [1][0][2][0][RTW89_FCC][9] = 68,
+ [1][0][2][0][RTW89_ETSI][9] = 58,
+ [1][0][2][0][RTW89_MKK][9] = 74,
+ [1][0][2][0][RTW89_IC][9] = 68,
+ [1][0][2][0][RTW89_KCC][9] = 74,
+ [1][0][2][0][RTW89_ACMA][9] = 58,
+ [1][0][2][0][RTW89_CHILE][9] = 66,
+ [1][0][2][0][RTW89_UKRAINE][9] = 58,
+ [1][0][2][0][RTW89_MEXICO][9] = 68,
+ [1][0][2][0][RTW89_CN][9] = 58,
+ [1][0][2][0][RTW89_QATAR][9] = 58,
+ [1][0][2][0][RTW89_UK][9] = 58,
+ [1][0][2][0][RTW89_FCC][10] = 66,
+ [1][0][2][0][RTW89_ETSI][10] = 58,
+ [1][0][2][0][RTW89_MKK][10] = 74,
+ [1][0][2][0][RTW89_IC][10] = 66,
+ [1][0][2][0][RTW89_KCC][10] = 74,
+ [1][0][2][0][RTW89_ACMA][10] = 58,
+ [1][0][2][0][RTW89_CHILE][10] = 66,
+ [1][0][2][0][RTW89_UKRAINE][10] = 58,
+ [1][0][2][0][RTW89_MEXICO][10] = 66,
+ [1][0][2][0][RTW89_CN][10] = 58,
+ [1][0][2][0][RTW89_QATAR][10] = 58,
+ [1][0][2][0][RTW89_UK][10] = 58,
+ [1][0][2][0][RTW89_FCC][11] = 127,
+ [1][0][2][0][RTW89_ETSI][11] = 127,
+ [1][0][2][0][RTW89_MKK][11] = 127,
+ [1][0][2][0][RTW89_IC][11] = 127,
+ [1][0][2][0][RTW89_KCC][11] = 127,
+ [1][0][2][0][RTW89_ACMA][11] = 127,
+ [1][0][2][0][RTW89_CHILE][11] = 127,
+ [1][0][2][0][RTW89_UKRAINE][11] = 127,
+ [1][0][2][0][RTW89_MEXICO][11] = 127,
+ [1][0][2][0][RTW89_CN][11] = 127,
+ [1][0][2][0][RTW89_QATAR][11] = 127,
+ [1][0][2][0][RTW89_UK][11] = 127,
+ [1][0][2][0][RTW89_FCC][12] = 127,
+ [1][0][2][0][RTW89_ETSI][12] = 127,
+ [1][0][2][0][RTW89_MKK][12] = 127,
+ [1][0][2][0][RTW89_IC][12] = 127,
+ [1][0][2][0][RTW89_KCC][12] = 127,
+ [1][0][2][0][RTW89_ACMA][12] = 127,
+ [1][0][2][0][RTW89_CHILE][12] = 127,
+ [1][0][2][0][RTW89_UKRAINE][12] = 127,
+ [1][0][2][0][RTW89_MEXICO][12] = 127,
+ [1][0][2][0][RTW89_CN][12] = 127,
+ [1][0][2][0][RTW89_QATAR][12] = 127,
+ [1][0][2][0][RTW89_UK][12] = 127,
+ [1][0][2][0][RTW89_FCC][13] = 127,
+ [1][0][2][0][RTW89_ETSI][13] = 127,
+ [1][0][2][0][RTW89_MKK][13] = 127,
+ [1][0][2][0][RTW89_IC][13] = 127,
+ [1][0][2][0][RTW89_KCC][13] = 127,
+ [1][0][2][0][RTW89_ACMA][13] = 127,
+ [1][0][2][0][RTW89_CHILE][13] = 127,
+ [1][0][2][0][RTW89_UKRAINE][13] = 127,
+ [1][0][2][0][RTW89_MEXICO][13] = 127,
+ [1][0][2][0][RTW89_CN][13] = 127,
+ [1][0][2][0][RTW89_QATAR][13] = 127,
+ [1][0][2][0][RTW89_UK][13] = 127,
+ [1][1][2][0][RTW89_FCC][0] = 127,
+ [1][1][2][0][RTW89_ETSI][0] = 127,
+ [1][1][2][0][RTW89_MKK][0] = 127,
+ [1][1][2][0][RTW89_IC][0] = 127,
+ [1][1][2][0][RTW89_KCC][0] = 127,
+ [1][1][2][0][RTW89_ACMA][0] = 127,
+ [1][1][2][0][RTW89_CHILE][0] = 127,
+ [1][1][2][0][RTW89_UKRAINE][0] = 127,
+ [1][1][2][0][RTW89_MEXICO][0] = 127,
+ [1][1][2][0][RTW89_CN][0] = 127,
+ [1][1][2][0][RTW89_QATAR][0] = 127,
+ [1][1][2][0][RTW89_UK][0] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 127,
+ [1][1][2][0][RTW89_ETSI][1] = 127,
+ [1][1][2][0][RTW89_MKK][1] = 127,
+ [1][1][2][0][RTW89_IC][1] = 127,
+ [1][1][2][0][RTW89_KCC][1] = 127,
+ [1][1][2][0][RTW89_ACMA][1] = 127,
+ [1][1][2][0][RTW89_CHILE][1] = 127,
+ [1][1][2][0][RTW89_UKRAINE][1] = 127,
+ [1][1][2][0][RTW89_MEXICO][1] = 127,
+ [1][1][2][0][RTW89_CN][1] = 127,
+ [1][1][2][0][RTW89_QATAR][1] = 127,
+ [1][1][2][0][RTW89_UK][1] = 127,
+ [1][1][2][0][RTW89_FCC][2] = 54,
+ [1][1][2][0][RTW89_ETSI][2] = 46,
+ [1][1][2][0][RTW89_MKK][2] = 66,
+ [1][1][2][0][RTW89_IC][2] = 54,
+ [1][1][2][0][RTW89_KCC][2] = 62,
+ [1][1][2][0][RTW89_ACMA][2] = 46,
+ [1][1][2][0][RTW89_CHILE][2] = 52,
+ [1][1][2][0][RTW89_UKRAINE][2] = 46,
+ [1][1][2][0][RTW89_MEXICO][2] = 54,
+ [1][1][2][0][RTW89_CN][2] = 46,
+ [1][1][2][0][RTW89_QATAR][2] = 46,
+ [1][1][2][0][RTW89_UK][2] = 46,
+ [1][1][2][0][RTW89_FCC][3] = 54,
+ [1][1][2][0][RTW89_ETSI][3] = 46,
+ [1][1][2][0][RTW89_MKK][3] = 66,
+ [1][1][2][0][RTW89_IC][3] = 54,
+ [1][1][2][0][RTW89_KCC][3] = 62,
+ [1][1][2][0][RTW89_ACMA][3] = 46,
+ [1][1][2][0][RTW89_CHILE][3] = 52,
+ [1][1][2][0][RTW89_UKRAINE][3] = 46,
+ [1][1][2][0][RTW89_MEXICO][3] = 54,
+ [1][1][2][0][RTW89_CN][3] = 46,
+ [1][1][2][0][RTW89_QATAR][3] = 46,
+ [1][1][2][0][RTW89_UK][3] = 46,
+ [1][1][2][0][RTW89_FCC][4] = 58,
+ [1][1][2][0][RTW89_ETSI][4] = 46,
+ [1][1][2][0][RTW89_MKK][4] = 66,
+ [1][1][2][0][RTW89_IC][4] = 58,
+ [1][1][2][0][RTW89_KCC][4] = 62,
+ [1][1][2][0][RTW89_ACMA][4] = 46,
+ [1][1][2][0][RTW89_CHILE][4] = 52,
+ [1][1][2][0][RTW89_UKRAINE][4] = 46,
+ [1][1][2][0][RTW89_MEXICO][4] = 58,
+ [1][1][2][0][RTW89_CN][4] = 46,
+ [1][1][2][0][RTW89_QATAR][4] = 46,
+ [1][1][2][0][RTW89_UK][4] = 46,
+ [1][1][2][0][RTW89_FCC][5] = 66,
+ [1][1][2][0][RTW89_ETSI][5] = 46,
+ [1][1][2][0][RTW89_MKK][5] = 66,
+ [1][1][2][0][RTW89_IC][5] = 66,
+ [1][1][2][0][RTW89_KCC][5] = 62,
+ [1][1][2][0][RTW89_ACMA][5] = 46,
+ [1][1][2][0][RTW89_CHILE][5] = 54,
+ [1][1][2][0][RTW89_UKRAINE][5] = 46,
+ [1][1][2][0][RTW89_MEXICO][5] = 66,
+ [1][1][2][0][RTW89_CN][5] = 46,
+ [1][1][2][0][RTW89_QATAR][5] = 46,
+ [1][1][2][0][RTW89_UK][5] = 46,
+ [1][1][2][0][RTW89_FCC][6] = 58,
+ [1][1][2][0][RTW89_ETSI][6] = 46,
+ [1][1][2][0][RTW89_MKK][6] = 66,
+ [1][1][2][0][RTW89_IC][6] = 58,
+ [1][1][2][0][RTW89_KCC][6] = 62,
+ [1][1][2][0][RTW89_ACMA][6] = 46,
+ [1][1][2][0][RTW89_CHILE][6] = 52,
+ [1][1][2][0][RTW89_UKRAINE][6] = 46,
+ [1][1][2][0][RTW89_MEXICO][6] = 58,
+ [1][1][2][0][RTW89_CN][6] = 46,
+ [1][1][2][0][RTW89_QATAR][6] = 46,
+ [1][1][2][0][RTW89_UK][6] = 46,
+ [1][1][2][0][RTW89_FCC][7] = 54,
+ [1][1][2][0][RTW89_ETSI][7] = 46,
+ [1][1][2][0][RTW89_MKK][7] = 66,
+ [1][1][2][0][RTW89_IC][7] = 54,
+ [1][1][2][0][RTW89_KCC][7] = 62,
+ [1][1][2][0][RTW89_ACMA][7] = 46,
+ [1][1][2][0][RTW89_CHILE][7] = 52,
+ [1][1][2][0][RTW89_UKRAINE][7] = 46,
+ [1][1][2][0][RTW89_MEXICO][7] = 54,
+ [1][1][2][0][RTW89_CN][7] = 46,
+ [1][1][2][0][RTW89_QATAR][7] = 46,
+ [1][1][2][0][RTW89_UK][7] = 46,
+ [1][1][2][0][RTW89_FCC][8] = 54,
+ [1][1][2][0][RTW89_ETSI][8] = 46,
+ [1][1][2][0][RTW89_MKK][8] = 66,
+ [1][1][2][0][RTW89_IC][8] = 54,
+ [1][1][2][0][RTW89_KCC][8] = 62,
+ [1][1][2][0][RTW89_ACMA][8] = 46,
+ [1][1][2][0][RTW89_CHILE][8] = 52,
+ [1][1][2][0][RTW89_UKRAINE][8] = 46,
+ [1][1][2][0][RTW89_MEXICO][8] = 54,
+ [1][1][2][0][RTW89_CN][8] = 46,
+ [1][1][2][0][RTW89_QATAR][8] = 46,
+ [1][1][2][0][RTW89_UK][8] = 46,
+ [1][1][2][0][RTW89_FCC][9] = 42,
+ [1][1][2][0][RTW89_ETSI][9] = 46,
+ [1][1][2][0][RTW89_MKK][9] = 66,
+ [1][1][2][0][RTW89_IC][9] = 42,
+ [1][1][2][0][RTW89_KCC][9] = 62,
+ [1][1][2][0][RTW89_ACMA][9] = 46,
+ [1][1][2][0][RTW89_CHILE][9] = 42,
+ [1][1][2][0][RTW89_UKRAINE][9] = 46,
+ [1][1][2][0][RTW89_MEXICO][9] = 42,
+ [1][1][2][0][RTW89_CN][9] = 46,
+ [1][1][2][0][RTW89_QATAR][9] = 46,
+ [1][1][2][0][RTW89_UK][9] = 46,
+ [1][1][2][0][RTW89_FCC][10] = 38,
+ [1][1][2][0][RTW89_ETSI][10] = 46,
+ [1][1][2][0][RTW89_MKK][10] = 66,
+ [1][1][2][0][RTW89_IC][10] = 38,
+ [1][1][2][0][RTW89_KCC][10] = 62,
+ [1][1][2][0][RTW89_ACMA][10] = 46,
+ [1][1][2][0][RTW89_CHILE][10] = 38,
+ [1][1][2][0][RTW89_UKRAINE][10] = 46,
+ [1][1][2][0][RTW89_MEXICO][10] = 38,
+ [1][1][2][0][RTW89_CN][10] = 46,
+ [1][1][2][0][RTW89_QATAR][10] = 46,
+ [1][1][2][0][RTW89_UK][10] = 46,
+ [1][1][2][0][RTW89_FCC][11] = 127,
+ [1][1][2][0][RTW89_ETSI][11] = 127,
+ [1][1][2][0][RTW89_MKK][11] = 127,
+ [1][1][2][0][RTW89_IC][11] = 127,
+ [1][1][2][0][RTW89_KCC][11] = 127,
+ [1][1][2][0][RTW89_ACMA][11] = 127,
+ [1][1][2][0][RTW89_CHILE][11] = 127,
+ [1][1][2][0][RTW89_UKRAINE][11] = 127,
+ [1][1][2][0][RTW89_MEXICO][11] = 127,
+ [1][1][2][0][RTW89_CN][11] = 127,
+ [1][1][2][0][RTW89_QATAR][11] = 127,
+ [1][1][2][0][RTW89_UK][11] = 127,
+ [1][1][2][0][RTW89_FCC][12] = 127,
+ [1][1][2][0][RTW89_ETSI][12] = 127,
+ [1][1][2][0][RTW89_MKK][12] = 127,
+ [1][1][2][0][RTW89_IC][12] = 127,
+ [1][1][2][0][RTW89_KCC][12] = 127,
+ [1][1][2][0][RTW89_ACMA][12] = 127,
+ [1][1][2][0][RTW89_CHILE][12] = 127,
+ [1][1][2][0][RTW89_UKRAINE][12] = 127,
+ [1][1][2][0][RTW89_MEXICO][12] = 127,
+ [1][1][2][0][RTW89_CN][12] = 127,
+ [1][1][2][0][RTW89_QATAR][12] = 127,
+ [1][1][2][0][RTW89_UK][12] = 127,
+ [1][1][2][0][RTW89_FCC][13] = 127,
+ [1][1][2][0][RTW89_ETSI][13] = 127,
+ [1][1][2][0][RTW89_MKK][13] = 127,
+ [1][1][2][0][RTW89_IC][13] = 127,
+ [1][1][2][0][RTW89_KCC][13] = 127,
+ [1][1][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][0][RTW89_CHILE][13] = 127,
+ [1][1][2][0][RTW89_UKRAINE][13] = 127,
+ [1][1][2][0][RTW89_MEXICO][13] = 127,
+ [1][1][2][0][RTW89_CN][13] = 127,
+ [1][1][2][0][RTW89_QATAR][13] = 127,
+ [1][1][2][0][RTW89_UK][13] = 127,
+ [1][1][2][1][RTW89_FCC][0] = 127,
+ [1][1][2][1][RTW89_ETSI][0] = 127,
+ [1][1][2][1][RTW89_MKK][0] = 127,
+ [1][1][2][1][RTW89_IC][0] = 127,
+ [1][1][2][1][RTW89_KCC][0] = 127,
+ [1][1][2][1][RTW89_ACMA][0] = 127,
+ [1][1][2][1][RTW89_CHILE][0] = 127,
+ [1][1][2][1][RTW89_UKRAINE][0] = 127,
+ [1][1][2][1][RTW89_MEXICO][0] = 127,
+ [1][1][2][1][RTW89_CN][0] = 127,
+ [1][1][2][1][RTW89_QATAR][0] = 127,
+ [1][1][2][1][RTW89_UK][0] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 127,
+ [1][1][2][1][RTW89_ETSI][1] = 127,
+ [1][1][2][1][RTW89_MKK][1] = 127,
+ [1][1][2][1][RTW89_IC][1] = 127,
+ [1][1][2][1][RTW89_KCC][1] = 127,
+ [1][1][2][1][RTW89_ACMA][1] = 127,
+ [1][1][2][1][RTW89_CHILE][1] = 127,
+ [1][1][2][1][RTW89_UKRAINE][1] = 127,
+ [1][1][2][1][RTW89_MEXICO][1] = 127,
+ [1][1][2][1][RTW89_CN][1] = 127,
+ [1][1][2][1][RTW89_QATAR][1] = 127,
+ [1][1][2][1][RTW89_UK][1] = 127,
+ [1][1][2][1][RTW89_FCC][2] = 54,
+ [1][1][2][1][RTW89_ETSI][2] = 34,
+ [1][1][2][1][RTW89_MKK][2] = 66,
+ [1][1][2][1][RTW89_IC][2] = 54,
+ [1][1][2][1][RTW89_KCC][2] = 62,
+ [1][1][2][1][RTW89_ACMA][2] = 34,
+ [1][1][2][1][RTW89_CHILE][2] = 42,
+ [1][1][2][1][RTW89_UKRAINE][2] = 34,
+ [1][1][2][1][RTW89_MEXICO][2] = 54,
+ [1][1][2][1][RTW89_CN][2] = 34,
+ [1][1][2][1][RTW89_QATAR][2] = 34,
+ [1][1][2][1][RTW89_UK][2] = 34,
+ [1][1][2][1][RTW89_FCC][3] = 54,
+ [1][1][2][1][RTW89_ETSI][3] = 34,
+ [1][1][2][1][RTW89_MKK][3] = 66,
+ [1][1][2][1][RTW89_IC][3] = 54,
+ [1][1][2][1][RTW89_KCC][3] = 62,
+ [1][1][2][1][RTW89_ACMA][3] = 34,
+ [1][1][2][1][RTW89_CHILE][3] = 42,
+ [1][1][2][1][RTW89_UKRAINE][3] = 34,
+ [1][1][2][1][RTW89_MEXICO][3] = 54,
+ [1][1][2][1][RTW89_CN][3] = 34,
+ [1][1][2][1][RTW89_QATAR][3] = 34,
+ [1][1][2][1][RTW89_UK][3] = 34,
+ [1][1][2][1][RTW89_FCC][4] = 58,
+ [1][1][2][1][RTW89_ETSI][4] = 34,
+ [1][1][2][1][RTW89_MKK][4] = 66,
+ [1][1][2][1][RTW89_IC][4] = 58,
+ [1][1][2][1][RTW89_KCC][4] = 62,
+ [1][1][2][1][RTW89_ACMA][4] = 34,
+ [1][1][2][1][RTW89_CHILE][4] = 42,
+ [1][1][2][1][RTW89_UKRAINE][4] = 34,
+ [1][1][2][1][RTW89_MEXICO][4] = 58,
+ [1][1][2][1][RTW89_CN][4] = 34,
+ [1][1][2][1][RTW89_QATAR][4] = 34,
+ [1][1][2][1][RTW89_UK][4] = 34,
+ [1][1][2][1][RTW89_FCC][5] = 66,
+ [1][1][2][1][RTW89_ETSI][5] = 34,
+ [1][1][2][1][RTW89_MKK][5] = 66,
+ [1][1][2][1][RTW89_IC][5] = 66,
+ [1][1][2][1][RTW89_KCC][5] = 62,
+ [1][1][2][1][RTW89_ACMA][5] = 34,
+ [1][1][2][1][RTW89_CHILE][5] = 42,
+ [1][1][2][1][RTW89_UKRAINE][5] = 34,
+ [1][1][2][1][RTW89_MEXICO][5] = 66,
+ [1][1][2][1][RTW89_CN][5] = 34,
+ [1][1][2][1][RTW89_QATAR][5] = 34,
+ [1][1][2][1][RTW89_UK][5] = 34,
+ [1][1][2][1][RTW89_FCC][6] = 58,
+ [1][1][2][1][RTW89_ETSI][6] = 34,
+ [1][1][2][1][RTW89_MKK][6] = 66,
+ [1][1][2][1][RTW89_IC][6] = 58,
+ [1][1][2][1][RTW89_KCC][6] = 62,
+ [1][1][2][1][RTW89_ACMA][6] = 34,
+ [1][1][2][1][RTW89_CHILE][6] = 42,
+ [1][1][2][1][RTW89_UKRAINE][6] = 34,
+ [1][1][2][1][RTW89_MEXICO][6] = 58,
+ [1][1][2][1][RTW89_CN][6] = 34,
+ [1][1][2][1][RTW89_QATAR][6] = 34,
+ [1][1][2][1][RTW89_UK][6] = 34,
+ [1][1][2][1][RTW89_FCC][7] = 54,
+ [1][1][2][1][RTW89_ETSI][7] = 34,
+ [1][1][2][1][RTW89_MKK][7] = 66,
+ [1][1][2][1][RTW89_IC][7] = 54,
+ [1][1][2][1][RTW89_KCC][7] = 62,
+ [1][1][2][1][RTW89_ACMA][7] = 34,
+ [1][1][2][1][RTW89_CHILE][7] = 42,
+ [1][1][2][1][RTW89_UKRAINE][7] = 34,
+ [1][1][2][1][RTW89_MEXICO][7] = 54,
+ [1][1][2][1][RTW89_CN][7] = 34,
+ [1][1][2][1][RTW89_QATAR][7] = 34,
+ [1][1][2][1][RTW89_UK][7] = 34,
+ [1][1][2][1][RTW89_FCC][8] = 54,
+ [1][1][2][1][RTW89_ETSI][8] = 34,
+ [1][1][2][1][RTW89_MKK][8] = 66,
+ [1][1][2][1][RTW89_IC][8] = 54,
+ [1][1][2][1][RTW89_KCC][8] = 62,
+ [1][1][2][1][RTW89_ACMA][8] = 34,
+ [1][1][2][1][RTW89_CHILE][8] = 42,
+ [1][1][2][1][RTW89_UKRAINE][8] = 34,
+ [1][1][2][1][RTW89_MEXICO][8] = 54,
+ [1][1][2][1][RTW89_CN][8] = 34,
+ [1][1][2][1][RTW89_QATAR][8] = 34,
+ [1][1][2][1][RTW89_UK][8] = 34,
+ [1][1][2][1][RTW89_FCC][9] = 42,
+ [1][1][2][1][RTW89_ETSI][9] = 34,
+ [1][1][2][1][RTW89_MKK][9] = 66,
+ [1][1][2][1][RTW89_IC][9] = 42,
+ [1][1][2][1][RTW89_KCC][9] = 62,
+ [1][1][2][1][RTW89_ACMA][9] = 34,
+ [1][1][2][1][RTW89_CHILE][9] = 42,
+ [1][1][2][1][RTW89_UKRAINE][9] = 34,
+ [1][1][2][1][RTW89_MEXICO][9] = 42,
+ [1][1][2][1][RTW89_CN][9] = 34,
+ [1][1][2][1][RTW89_QATAR][9] = 34,
+ [1][1][2][1][RTW89_UK][9] = 34,
+ [1][1][2][1][RTW89_FCC][10] = 38,
+ [1][1][2][1][RTW89_ETSI][10] = 34,
+ [1][1][2][1][RTW89_MKK][10] = 66,
+ [1][1][2][1][RTW89_IC][10] = 38,
+ [1][1][2][1][RTW89_KCC][10] = 62,
+ [1][1][2][1][RTW89_ACMA][10] = 34,
+ [1][1][2][1][RTW89_CHILE][10] = 38,
+ [1][1][2][1][RTW89_UKRAINE][10] = 34,
+ [1][1][2][1][RTW89_MEXICO][10] = 38,
+ [1][1][2][1][RTW89_CN][10] = 34,
+ [1][1][2][1][RTW89_QATAR][10] = 34,
+ [1][1][2][1][RTW89_UK][10] = 34,
+ [1][1][2][1][RTW89_FCC][11] = 127,
+ [1][1][2][1][RTW89_ETSI][11] = 127,
+ [1][1][2][1][RTW89_MKK][11] = 127,
+ [1][1][2][1][RTW89_IC][11] = 127,
+ [1][1][2][1][RTW89_KCC][11] = 127,
+ [1][1][2][1][RTW89_ACMA][11] = 127,
+ [1][1][2][1][RTW89_CHILE][11] = 127,
+ [1][1][2][1][RTW89_UKRAINE][11] = 127,
+ [1][1][2][1][RTW89_MEXICO][11] = 127,
+ [1][1][2][1][RTW89_CN][11] = 127,
+ [1][1][2][1][RTW89_QATAR][11] = 127,
+ [1][1][2][1][RTW89_UK][11] = 127,
+ [1][1][2][1][RTW89_FCC][12] = 127,
+ [1][1][2][1][RTW89_ETSI][12] = 127,
+ [1][1][2][1][RTW89_MKK][12] = 127,
+ [1][1][2][1][RTW89_IC][12] = 127,
+ [1][1][2][1][RTW89_KCC][12] = 127,
+ [1][1][2][1][RTW89_ACMA][12] = 127,
+ [1][1][2][1][RTW89_CHILE][12] = 127,
+ [1][1][2][1][RTW89_UKRAINE][12] = 127,
+ [1][1][2][1][RTW89_MEXICO][12] = 127,
+ [1][1][2][1][RTW89_CN][12] = 127,
+ [1][1][2][1][RTW89_QATAR][12] = 127,
+ [1][1][2][1][RTW89_UK][12] = 127,
+ [1][1][2][1][RTW89_FCC][13] = 127,
+ [1][1][2][1][RTW89_ETSI][13] = 127,
+ [1][1][2][1][RTW89_MKK][13] = 127,
+ [1][1][2][1][RTW89_IC][13] = 127,
+ [1][1][2][1][RTW89_KCC][13] = 127,
+ [1][1][2][1][RTW89_ACMA][13] = 127,
+ [1][1][2][1][RTW89_CHILE][13] = 127,
+ [1][1][2][1][RTW89_UKRAINE][13] = 127,
+ [1][1][2][1][RTW89_MEXICO][13] = 127,
+ [1][1][2][1][RTW89_CN][13] = 127,
+ [1][1][2][1][RTW89_QATAR][13] = 127,
+ [1][1][2][1][RTW89_UK][13] = 127,
+};
+
+const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][1][0][RTW89_WW][0] = 42,
+ [0][0][1][0][RTW89_WW][2] = 42,
+ [0][0][1][0][RTW89_WW][4] = 42,
+ [0][0][1][0][RTW89_WW][6] = 42,
+ [0][0][1][0][RTW89_WW][8] = 52,
+ [0][0][1][0][RTW89_WW][10] = 52,
+ [0][0][1][0][RTW89_WW][12] = 52,
+ [0][0][1][0][RTW89_WW][14] = 52,
+ [0][0][1][0][RTW89_WW][15] = 52,
+ [0][0][1][0][RTW89_WW][17] = 52,
+ [0][0][1][0][RTW89_WW][19] = 52,
+ [0][0][1][0][RTW89_WW][21] = 52,
+ [0][0][1][0][RTW89_WW][23] = 52,
+ [0][0][1][0][RTW89_WW][25] = 52,
+ [0][0][1][0][RTW89_WW][27] = 52,
+ [0][0][1][0][RTW89_WW][29] = 52,
+ [0][0][1][0][RTW89_WW][31] = 52,
+ [0][0][1][0][RTW89_WW][33] = 52,
+ [0][0][1][0][RTW89_WW][35] = 52,
+ [0][0][1][0][RTW89_WW][37] = 68,
+ [0][0][1][0][RTW89_WW][38] = 28,
+ [0][0][1][0][RTW89_WW][40] = 28,
+ [0][0][1][0][RTW89_WW][42] = 28,
+ [0][0][1][0][RTW89_WW][44] = 28,
+ [0][0][1][0][RTW89_WW][46] = 28,
+ [0][0][1][0][RTW89_WW][48] = 78,
+ [0][0][1][0][RTW89_WW][50] = 78,
+ [0][0][1][0][RTW89_WW][52] = 78,
+ [0][1][1][0][RTW89_WW][0] = 30,
+ [0][1][1][0][RTW89_WW][2] = 32,
+ [0][1][1][0][RTW89_WW][4] = 30,
+ [0][1][1][0][RTW89_WW][6] = 30,
+ [0][1][1][0][RTW89_WW][8] = 40,
+ [0][1][1][0][RTW89_WW][10] = 40,
+ [0][1][1][0][RTW89_WW][12] = 40,
+ [0][1][1][0][RTW89_WW][14] = 40,
+ [0][1][1][0][RTW89_WW][15] = 40,
+ [0][1][1][0][RTW89_WW][17] = 40,
+ [0][1][1][0][RTW89_WW][19] = 40,
+ [0][1][1][0][RTW89_WW][21] = 40,
+ [0][1][1][0][RTW89_WW][23] = 40,
+ [0][1][1][0][RTW89_WW][25] = 40,
+ [0][1][1][0][RTW89_WW][27] = 40,
+ [0][1][1][0][RTW89_WW][29] = 40,
+ [0][1][1][0][RTW89_WW][31] = 40,
+ [0][1][1][0][RTW89_WW][33] = 40,
+ [0][1][1][0][RTW89_WW][35] = 40,
+ [0][1][1][0][RTW89_WW][37] = 50,
+ [0][1][1][0][RTW89_WW][38] = 16,
+ [0][1][1][0][RTW89_WW][40] = 16,
+ [0][1][1][0][RTW89_WW][42] = 16,
+ [0][1][1][0][RTW89_WW][44] = 16,
+ [0][1][1][0][RTW89_WW][46] = 16,
+ [0][1][1][0][RTW89_WW][48] = 56,
+ [0][1][1][0][RTW89_WW][50] = 56,
+ [0][1][1][0][RTW89_WW][52] = 56,
+ [0][0][2][0][RTW89_WW][0] = 42,
+ [0][0][2][0][RTW89_WW][2] = 42,
+ [0][0][2][0][RTW89_WW][4] = 42,
+ [0][0][2][0][RTW89_WW][6] = 42,
+ [0][0][2][0][RTW89_WW][8] = 52,
+ [0][0][2][0][RTW89_WW][10] = 52,
+ [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][14] = 52,
+ [0][0][2][0][RTW89_WW][15] = 52,
+ [0][0][2][0][RTW89_WW][17] = 52,
+ [0][0][2][0][RTW89_WW][19] = 52,
+ [0][0][2][0][RTW89_WW][21] = 52,
+ [0][0][2][0][RTW89_WW][23] = 52,
+ [0][0][2][0][RTW89_WW][25] = 52,
+ [0][0][2][0][RTW89_WW][27] = 52,
+ [0][0][2][0][RTW89_WW][29] = 52,
+ [0][0][2][0][RTW89_WW][31] = 52,
+ [0][0][2][0][RTW89_WW][33] = 52,
+ [0][0][2][0][RTW89_WW][35] = 52,
+ [0][0][2][0][RTW89_WW][37] = 64,
+ [0][0][2][0][RTW89_WW][38] = 28,
+ [0][0][2][0][RTW89_WW][40] = 28,
+ [0][0][2][0][RTW89_WW][42] = 28,
+ [0][0][2][0][RTW89_WW][44] = 28,
+ [0][0][2][0][RTW89_WW][46] = 28,
+ [0][0][2][0][RTW89_WW][48] = 78,
+ [0][0][2][0][RTW89_WW][50] = 78,
+ [0][0][2][0][RTW89_WW][52] = 78,
+ [0][1][2][0][RTW89_WW][0] = 30,
+ [0][1][2][0][RTW89_WW][2] = 30,
+ [0][1][2][0][RTW89_WW][4] = 30,
+ [0][1][2][0][RTW89_WW][6] = 30,
+ [0][1][2][0][RTW89_WW][8] = 40,
+ [0][1][2][0][RTW89_WW][10] = 40,
+ [0][1][2][0][RTW89_WW][12] = 40,
+ [0][1][2][0][RTW89_WW][14] = 40,
+ [0][1][2][0][RTW89_WW][15] = 40,
+ [0][1][2][0][RTW89_WW][17] = 40,
+ [0][1][2][0][RTW89_WW][19] = 40,
+ [0][1][2][0][RTW89_WW][21] = 40,
+ [0][1][2][0][RTW89_WW][23] = 40,
+ [0][1][2][0][RTW89_WW][25] = 40,
+ [0][1][2][0][RTW89_WW][27] = 40,
+ [0][1][2][0][RTW89_WW][29] = 40,
+ [0][1][2][0][RTW89_WW][31] = 40,
+ [0][1][2][0][RTW89_WW][33] = 40,
+ [0][1][2][0][RTW89_WW][35] = 40,
+ [0][1][2][0][RTW89_WW][37] = 50,
+ [0][1][2][0][RTW89_WW][38] = 16,
+ [0][1][2][0][RTW89_WW][40] = 16,
+ [0][1][2][0][RTW89_WW][42] = 16,
+ [0][1][2][0][RTW89_WW][44] = 16,
+ [0][1][2][0][RTW89_WW][46] = 16,
+ [0][1][2][0][RTW89_WW][48] = 58,
+ [0][1][2][0][RTW89_WW][50] = 58,
+ [0][1][2][0][RTW89_WW][52] = 58,
+ [0][1][2][1][RTW89_WW][0] = 14,
+ [0][1][2][1][RTW89_WW][2] = 14,
+ [0][1][2][1][RTW89_WW][4] = 14,
+ [0][1][2][1][RTW89_WW][6] = 14,
+ [0][1][2][1][RTW89_WW][8] = 28,
+ [0][1][2][1][RTW89_WW][10] = 28,
+ [0][1][2][1][RTW89_WW][12] = 28,
+ [0][1][2][1][RTW89_WW][14] = 28,
+ [0][1][2][1][RTW89_WW][15] = 28,
+ [0][1][2][1][RTW89_WW][17] = 28,
+ [0][1][2][1][RTW89_WW][19] = 28,
+ [0][1][2][1][RTW89_WW][21] = 28,
+ [0][1][2][1][RTW89_WW][23] = 28,
+ [0][1][2][1][RTW89_WW][25] = 28,
+ [0][1][2][1][RTW89_WW][27] = 28,
+ [0][1][2][1][RTW89_WW][29] = 28,
+ [0][1][2][1][RTW89_WW][31] = 28,
+ [0][1][2][1][RTW89_WW][33] = 28,
+ [0][1][2][1][RTW89_WW][35] = 28,
+ [0][1][2][1][RTW89_WW][37] = 36,
+ [0][1][2][1][RTW89_WW][38] = 4,
+ [0][1][2][1][RTW89_WW][40] = 4,
+ [0][1][2][1][RTW89_WW][42] = 4,
+ [0][1][2][1][RTW89_WW][44] = 4,
+ [0][1][2][1][RTW89_WW][46] = 4,
+ [0][1][2][1][RTW89_WW][48] = 58,
+ [0][1][2][1][RTW89_WW][50] = 58,
+ [0][1][2][1][RTW89_WW][52] = 58,
+ [1][0][2][0][RTW89_WW][1] = 42,
+ [1][0][2][0][RTW89_WW][5] = 42,
+ [1][0][2][0][RTW89_WW][9] = 52,
+ [1][0][2][0][RTW89_WW][13] = 52,
+ [1][0][2][0][RTW89_WW][16] = 52,
+ [1][0][2][0][RTW89_WW][20] = 52,
+ [1][0][2][0][RTW89_WW][24] = 52,
+ [1][0][2][0][RTW89_WW][28] = 52,
+ [1][0][2][0][RTW89_WW][32] = 52,
+ [1][0][2][0][RTW89_WW][36] = 64,
+ [1][0][2][0][RTW89_WW][39] = 28,
+ [1][0][2][0][RTW89_WW][43] = 28,
+ [1][0][2][0][RTW89_WW][47] = 78,
+ [1][0][2][0][RTW89_WW][51] = 70,
+ [1][1][2][0][RTW89_WW][1] = 30,
+ [1][1][2][0][RTW89_WW][5] = 30,
+ [1][1][2][0][RTW89_WW][9] = 40,
+ [1][1][2][0][RTW89_WW][13] = 40,
+ [1][1][2][0][RTW89_WW][16] = 40,
+ [1][1][2][0][RTW89_WW][20] = 40,
+ [1][1][2][0][RTW89_WW][24] = 40,
+ [1][1][2][0][RTW89_WW][28] = 40,
+ [1][1][2][0][RTW89_WW][32] = 40,
+ [1][1][2][0][RTW89_WW][36] = 50,
+ [1][1][2][0][RTW89_WW][39] = 16,
+ [1][1][2][0][RTW89_WW][43] = 16,
+ [1][1][2][0][RTW89_WW][47] = 68,
+ [1][1][2][0][RTW89_WW][51] = 66,
+ [1][1][2][1][RTW89_WW][1] = 16,
+ [1][1][2][1][RTW89_WW][5] = 16,
+ [1][1][2][1][RTW89_WW][9] = 28,
+ [1][1][2][1][RTW89_WW][13] = 28,
+ [1][1][2][1][RTW89_WW][16] = 28,
+ [1][1][2][1][RTW89_WW][20] = 28,
+ [1][1][2][1][RTW89_WW][24] = 28,
+ [1][1][2][1][RTW89_WW][28] = 28,
+ [1][1][2][1][RTW89_WW][32] = 28,
+ [1][1][2][1][RTW89_WW][36] = 36,
+ [1][1][2][1][RTW89_WW][39] = 4,
+ [1][1][2][1][RTW89_WW][43] = 4,
+ [1][1][2][1][RTW89_WW][47] = 68,
+ [1][1][2][1][RTW89_WW][51] = 66,
+ [2][0][2][0][RTW89_WW][3] = 42,
+ [2][0][2][0][RTW89_WW][11] = 52,
+ [2][0][2][0][RTW89_WW][18] = 52,
+ [2][0][2][0][RTW89_WW][26] = 52,
+ [2][0][2][0][RTW89_WW][34] = 64,
+ [2][0][2][0][RTW89_WW][41] = 28,
+ [2][0][2][0][RTW89_WW][49] = 64,
+ [2][1][2][0][RTW89_WW][3] = 28,
+ [2][1][2][0][RTW89_WW][11] = 40,
+ [2][1][2][0][RTW89_WW][18] = 40,
+ [2][1][2][0][RTW89_WW][26] = 40,
+ [2][1][2][0][RTW89_WW][34] = 50,
+ [2][1][2][0][RTW89_WW][41] = 16,
+ [2][1][2][0][RTW89_WW][49] = 58,
+ [2][1][2][1][RTW89_WW][3] = 16,
+ [2][1][2][1][RTW89_WW][11] = 28,
+ [2][1][2][1][RTW89_WW][18] = 28,
+ [2][1][2][1][RTW89_WW][26] = 28,
+ [2][1][2][1][RTW89_WW][34] = 34,
+ [2][1][2][1][RTW89_WW][41] = 4,
+ [2][1][2][1][RTW89_WW][49] = 58,
+ [0][0][1][0][RTW89_FCC][0] = 78,
+ [0][0][1][0][RTW89_ETSI][0] = 58,
+ [0][0][1][0][RTW89_MKK][0] = 60,
+ [0][0][1][0][RTW89_IC][0] = 60,
+ [0][0][1][0][RTW89_KCC][0] = 76,
+ [0][0][1][0][RTW89_ACMA][0] = 58,
+ [0][0][1][0][RTW89_CHILE][0] = 42,
+ [0][0][1][0][RTW89_UKRAINE][0] = 52,
+ [0][0][1][0][RTW89_MEXICO][0] = 62,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 58,
+ [0][0][1][0][RTW89_FCC][2] = 78,
+ [0][0][1][0][RTW89_ETSI][2] = 58,
+ [0][0][1][0][RTW89_MKK][2] = 60,
+ [0][0][1][0][RTW89_IC][2] = 60,
+ [0][0][1][0][RTW89_KCC][2] = 76,
+ [0][0][1][0][RTW89_ACMA][2] = 58,
+ [0][0][1][0][RTW89_CHILE][2] = 42,
+ [0][0][1][0][RTW89_UKRAINE][2] = 52,
+ [0][0][1][0][RTW89_MEXICO][2] = 62,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 58,
+ [0][0][1][0][RTW89_FCC][4] = 78,
+ [0][0][1][0][RTW89_ETSI][4] = 58,
+ [0][0][1][0][RTW89_MKK][4] = 60,
+ [0][0][1][0][RTW89_IC][4] = 60,
+ [0][0][1][0][RTW89_KCC][4] = 76,
+ [0][0][1][0][RTW89_ACMA][4] = 58,
+ [0][0][1][0][RTW89_CHILE][4] = 42,
+ [0][0][1][0][RTW89_UKRAINE][4] = 52,
+ [0][0][1][0][RTW89_MEXICO][4] = 62,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 58,
+ [0][0][1][0][RTW89_FCC][6] = 78,
+ [0][0][1][0][RTW89_ETSI][6] = 58,
+ [0][0][1][0][RTW89_MKK][6] = 60,
+ [0][0][1][0][RTW89_IC][6] = 60,
+ [0][0][1][0][RTW89_KCC][6] = 50,
+ [0][0][1][0][RTW89_ACMA][6] = 58,
+ [0][0][1][0][RTW89_CHILE][6] = 42,
+ [0][0][1][0][RTW89_UKRAINE][6] = 52,
+ [0][0][1][0][RTW89_MEXICO][6] = 62,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 58,
+ [0][0][1][0][RTW89_FCC][8] = 78,
+ [0][0][1][0][RTW89_ETSI][8] = 58,
+ [0][0][1][0][RTW89_MKK][8] = 62,
+ [0][0][1][0][RTW89_IC][8] = 64,
+ [0][0][1][0][RTW89_KCC][8] = 70,
+ [0][0][1][0][RTW89_ACMA][8] = 58,
+ [0][0][1][0][RTW89_CHILE][8] = 66,
+ [0][0][1][0][RTW89_UKRAINE][8] = 52,
+ [0][0][1][0][RTW89_MEXICO][8] = 78,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 58,
+ [0][0][1][0][RTW89_FCC][10] = 78,
+ [0][0][1][0][RTW89_ETSI][10] = 58,
+ [0][0][1][0][RTW89_MKK][10] = 62,
+ [0][0][1][0][RTW89_IC][10] = 64,
+ [0][0][1][0][RTW89_KCC][10] = 70,
+ [0][0][1][0][RTW89_ACMA][10] = 58,
+ [0][0][1][0][RTW89_CHILE][10] = 66,
+ [0][0][1][0][RTW89_UKRAINE][10] = 52,
+ [0][0][1][0][RTW89_MEXICO][10] = 78,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 58,
+ [0][0][1][0][RTW89_FCC][12] = 78,
+ [0][0][1][0][RTW89_ETSI][12] = 58,
+ [0][0][1][0][RTW89_MKK][12] = 62,
+ [0][0][1][0][RTW89_IC][12] = 64,
+ [0][0][1][0][RTW89_KCC][12] = 74,
+ [0][0][1][0][RTW89_ACMA][12] = 58,
+ [0][0][1][0][RTW89_CHILE][12] = 66,
+ [0][0][1][0][RTW89_UKRAINE][12] = 52,
+ [0][0][1][0][RTW89_MEXICO][12] = 78,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 58,
+ [0][0][1][0][RTW89_FCC][14] = 78,
+ [0][0][1][0][RTW89_ETSI][14] = 58,
+ [0][0][1][0][RTW89_MKK][14] = 60,
+ [0][0][1][0][RTW89_IC][14] = 64,
+ [0][0][1][0][RTW89_KCC][14] = 74,
+ [0][0][1][0][RTW89_ACMA][14] = 58,
+ [0][0][1][0][RTW89_CHILE][14] = 66,
+ [0][0][1][0][RTW89_UKRAINE][14] = 52,
+ [0][0][1][0][RTW89_MEXICO][14] = 78,
+ [0][0][1][0][RTW89_CN][14] = 58,
+ [0][0][1][0][RTW89_QATAR][14] = 58,
+ [0][0][1][0][RTW89_UK][14] = 58,
+ [0][0][1][0][RTW89_FCC][15] = 76,
+ [0][0][1][0][RTW89_ETSI][15] = 58,
+ [0][0][1][0][RTW89_MKK][15] = 76,
+ [0][0][1][0][RTW89_IC][15] = 76,
+ [0][0][1][0][RTW89_KCC][15] = 74,
+ [0][0][1][0][RTW89_ACMA][15] = 58,
+ [0][0][1][0][RTW89_CHILE][15] = 66,
+ [0][0][1][0][RTW89_UKRAINE][15] = 52,
+ [0][0][1][0][RTW89_MEXICO][15] = 76,
+ [0][0][1][0][RTW89_CN][15] = 127,
+ [0][0][1][0][RTW89_QATAR][15] = 58,
+ [0][0][1][0][RTW89_UK][15] = 58,
+ [0][0][1][0][RTW89_FCC][17] = 78,
+ [0][0][1][0][RTW89_ETSI][17] = 58,
+ [0][0][1][0][RTW89_MKK][17] = 76,
+ [0][0][1][0][RTW89_IC][17] = 78,
+ [0][0][1][0][RTW89_KCC][17] = 74,
+ [0][0][1][0][RTW89_ACMA][17] = 58,
+ [0][0][1][0][RTW89_CHILE][17] = 66,
+ [0][0][1][0][RTW89_UKRAINE][17] = 52,
+ [0][0][1][0][RTW89_MEXICO][17] = 78,
+ [0][0][1][0][RTW89_CN][17] = 127,
+ [0][0][1][0][RTW89_QATAR][17] = 58,
+ [0][0][1][0][RTW89_UK][17] = 58,
+ [0][0][1][0][RTW89_FCC][19] = 78,
+ [0][0][1][0][RTW89_ETSI][19] = 58,
+ [0][0][1][0][RTW89_MKK][19] = 76,
+ [0][0][1][0][RTW89_IC][19] = 78,
+ [0][0][1][0][RTW89_KCC][19] = 74,
+ [0][0][1][0][RTW89_ACMA][19] = 58,
+ [0][0][1][0][RTW89_CHILE][19] = 66,
+ [0][0][1][0][RTW89_UKRAINE][19] = 52,
+ [0][0][1][0][RTW89_MEXICO][19] = 78,
+ [0][0][1][0][RTW89_CN][19] = 127,
+ [0][0][1][0][RTW89_QATAR][19] = 58,
+ [0][0][1][0][RTW89_UK][19] = 58,
+ [0][0][1][0][RTW89_FCC][21] = 78,
+ [0][0][1][0][RTW89_ETSI][21] = 58,
+ [0][0][1][0][RTW89_MKK][21] = 76,
+ [0][0][1][0][RTW89_IC][21] = 78,
+ [0][0][1][0][RTW89_KCC][21] = 74,
+ [0][0][1][0][RTW89_ACMA][21] = 58,
+ [0][0][1][0][RTW89_CHILE][21] = 68,
+ [0][0][1][0][RTW89_UKRAINE][21] = 52,
+ [0][0][1][0][RTW89_MEXICO][21] = 78,
+ [0][0][1][0][RTW89_CN][21] = 127,
+ [0][0][1][0][RTW89_QATAR][21] = 58,
+ [0][0][1][0][RTW89_UK][21] = 58,
+ [0][0][1][0][RTW89_FCC][23] = 78,
+ [0][0][1][0][RTW89_ETSI][23] = 58,
+ [0][0][1][0][RTW89_MKK][23] = 76,
+ [0][0][1][0][RTW89_IC][23] = 78,
+ [0][0][1][0][RTW89_KCC][23] = 74,
+ [0][0][1][0][RTW89_ACMA][23] = 58,
+ [0][0][1][0][RTW89_CHILE][23] = 68,
+ [0][0][1][0][RTW89_UKRAINE][23] = 52,
+ [0][0][1][0][RTW89_MEXICO][23] = 78,
+ [0][0][1][0][RTW89_CN][23] = 127,
+ [0][0][1][0][RTW89_QATAR][23] = 58,
+ [0][0][1][0][RTW89_UK][23] = 58,
+ [0][0][1][0][RTW89_FCC][25] = 78,
+ [0][0][1][0][RTW89_ETSI][25] = 58,
+ [0][0][1][0][RTW89_MKK][25] = 76,
+ [0][0][1][0][RTW89_IC][25] = 127,
+ [0][0][1][0][RTW89_KCC][25] = 74,
+ [0][0][1][0][RTW89_ACMA][25] = 127,
+ [0][0][1][0][RTW89_CHILE][25] = 68,
+ [0][0][1][0][RTW89_UKRAINE][25] = 52,
+ [0][0][1][0][RTW89_MEXICO][25] = 78,
+ [0][0][1][0][RTW89_CN][25] = 127,
+ [0][0][1][0][RTW89_QATAR][25] = 58,
+ [0][0][1][0][RTW89_UK][25] = 58,
+ [0][0][1][0][RTW89_FCC][27] = 78,
+ [0][0][1][0][RTW89_ETSI][27] = 58,
+ [0][0][1][0][RTW89_MKK][27] = 76,
+ [0][0][1][0][RTW89_IC][27] = 127,
+ [0][0][1][0][RTW89_KCC][27] = 74,
+ [0][0][1][0][RTW89_ACMA][27] = 127,
+ [0][0][1][0][RTW89_CHILE][27] = 66,
+ [0][0][1][0][RTW89_UKRAINE][27] = 52,
+ [0][0][1][0][RTW89_MEXICO][27] = 78,
+ [0][0][1][0][RTW89_CN][27] = 127,
+ [0][0][1][0][RTW89_QATAR][27] = 58,
+ [0][0][1][0][RTW89_UK][27] = 58,
+ [0][0][1][0][RTW89_FCC][29] = 78,
+ [0][0][1][0][RTW89_ETSI][29] = 58,
+ [0][0][1][0][RTW89_MKK][29] = 76,
+ [0][0][1][0][RTW89_IC][29] = 127,
+ [0][0][1][0][RTW89_KCC][29] = 74,
+ [0][0][1][0][RTW89_ACMA][29] = 127,
+ [0][0][1][0][RTW89_CHILE][29] = 66,
+ [0][0][1][0][RTW89_UKRAINE][29] = 52,
+ [0][0][1][0][RTW89_MEXICO][29] = 78,
+ [0][0][1][0][RTW89_CN][29] = 127,
+ [0][0][1][0][RTW89_QATAR][29] = 58,
+ [0][0][1][0][RTW89_UK][29] = 58,
+ [0][0][1][0][RTW89_FCC][31] = 78,
+ [0][0][1][0][RTW89_ETSI][31] = 58,
+ [0][0][1][0][RTW89_MKK][31] = 76,
+ [0][0][1][0][RTW89_IC][31] = 78,
+ [0][0][1][0][RTW89_KCC][31] = 72,
+ [0][0][1][0][RTW89_ACMA][31] = 58,
+ [0][0][1][0][RTW89_CHILE][31] = 66,
+ [0][0][1][0][RTW89_UKRAINE][31] = 52,
+ [0][0][1][0][RTW89_MEXICO][31] = 78,
+ [0][0][1][0][RTW89_CN][31] = 127,
+ [0][0][1][0][RTW89_QATAR][31] = 58,
+ [0][0][1][0][RTW89_UK][31] = 58,
+ [0][0][1][0][RTW89_FCC][33] = 78,
+ [0][0][1][0][RTW89_ETSI][33] = 58,
+ [0][0][1][0][RTW89_MKK][33] = 76,
+ [0][0][1][0][RTW89_IC][33] = 78,
+ [0][0][1][0][RTW89_KCC][33] = 72,
+ [0][0][1][0][RTW89_ACMA][33] = 58,
+ [0][0][1][0][RTW89_CHILE][33] = 66,
+ [0][0][1][0][RTW89_UKRAINE][33] = 52,
+ [0][0][1][0][RTW89_MEXICO][33] = 78,
+ [0][0][1][0][RTW89_CN][33] = 127,
+ [0][0][1][0][RTW89_QATAR][33] = 58,
+ [0][0][1][0][RTW89_UK][33] = 58,
+ [0][0][1][0][RTW89_FCC][35] = 70,
+ [0][0][1][0][RTW89_ETSI][35] = 58,
+ [0][0][1][0][RTW89_MKK][35] = 76,
+ [0][0][1][0][RTW89_IC][35] = 70,
+ [0][0][1][0][RTW89_KCC][35] = 72,
+ [0][0][1][0][RTW89_ACMA][35] = 58,
+ [0][0][1][0][RTW89_CHILE][35] = 66,
+ [0][0][1][0][RTW89_UKRAINE][35] = 52,
+ [0][0][1][0][RTW89_MEXICO][35] = 70,
+ [0][0][1][0][RTW89_CN][35] = 127,
+ [0][0][1][0][RTW89_QATAR][35] = 58,
+ [0][0][1][0][RTW89_UK][35] = 58,
+ [0][0][1][0][RTW89_FCC][37] = 78,
+ [0][0][1][0][RTW89_ETSI][37] = 127,
+ [0][0][1][0][RTW89_MKK][37] = 76,
+ [0][0][1][0][RTW89_IC][37] = 78,
+ [0][0][1][0][RTW89_KCC][37] = 72,
+ [0][0][1][0][RTW89_ACMA][37] = 76,
+ [0][0][1][0][RTW89_CHILE][37] = 68,
+ [0][0][1][0][RTW89_UKRAINE][37] = 127,
+ [0][0][1][0][RTW89_MEXICO][37] = 78,
+ [0][0][1][0][RTW89_CN][37] = 127,
+ [0][0][1][0][RTW89_QATAR][37] = 127,
+ [0][0][1][0][RTW89_UK][37] = 76,
+ [0][0][1][0][RTW89_FCC][38] = 78,
+ [0][0][1][0][RTW89_ETSI][38] = 28,
+ [0][0][1][0][RTW89_MKK][38] = 127,
+ [0][0][1][0][RTW89_IC][38] = 78,
+ [0][0][1][0][RTW89_KCC][38] = 74,
+ [0][0][1][0][RTW89_ACMA][38] = 76,
+ [0][0][1][0][RTW89_CHILE][38] = 68,
+ [0][0][1][0][RTW89_UKRAINE][38] = 28,
+ [0][0][1][0][RTW89_MEXICO][38] = 78,
+ [0][0][1][0][RTW89_CN][38] = 76,
+ [0][0][1][0][RTW89_QATAR][38] = 28,
+ [0][0][1][0][RTW89_UK][38] = 58,
+ [0][0][1][0][RTW89_FCC][40] = 78,
+ [0][0][1][0][RTW89_ETSI][40] = 28,
+ [0][0][1][0][RTW89_MKK][40] = 127,
+ [0][0][1][0][RTW89_IC][40] = 78,
+ [0][0][1][0][RTW89_KCC][40] = 74,
+ [0][0][1][0][RTW89_ACMA][40] = 76,
+ [0][0][1][0][RTW89_CHILE][40] = 68,
+ [0][0][1][0][RTW89_UKRAINE][40] = 28,
+ [0][0][1][0][RTW89_MEXICO][40] = 78,
+ [0][0][1][0][RTW89_CN][40] = 76,
+ [0][0][1][0][RTW89_QATAR][40] = 28,
+ [0][0][1][0][RTW89_UK][40] = 58,
+ [0][0][1][0][RTW89_FCC][42] = 78,
+ [0][0][1][0][RTW89_ETSI][42] = 28,
+ [0][0][1][0][RTW89_MKK][42] = 127,
+ [0][0][1][0][RTW89_IC][42] = 78,
+ [0][0][1][0][RTW89_KCC][42] = 74,
+ [0][0][1][0][RTW89_ACMA][42] = 76,
+ [0][0][1][0][RTW89_CHILE][42] = 66,
+ [0][0][1][0][RTW89_UKRAINE][42] = 28,
+ [0][0][1][0][RTW89_MEXICO][42] = 78,
+ [0][0][1][0][RTW89_CN][42] = 76,
+ [0][0][1][0][RTW89_QATAR][42] = 28,
+ [0][0][1][0][RTW89_UK][42] = 58,
+ [0][0][1][0][RTW89_FCC][44] = 78,
+ [0][0][1][0][RTW89_ETSI][44] = 28,
+ [0][0][1][0][RTW89_MKK][44] = 127,
+ [0][0][1][0][RTW89_IC][44] = 78,
+ [0][0][1][0][RTW89_KCC][44] = 74,
+ [0][0][1][0][RTW89_ACMA][44] = 76,
+ [0][0][1][0][RTW89_CHILE][44] = 68,
+ [0][0][1][0][RTW89_UKRAINE][44] = 28,
+ [0][0][1][0][RTW89_MEXICO][44] = 78,
+ [0][0][1][0][RTW89_CN][44] = 76,
+ [0][0][1][0][RTW89_QATAR][44] = 28,
+ [0][0][1][0][RTW89_UK][44] = 58,
+ [0][0][1][0][RTW89_FCC][46] = 78,
+ [0][0][1][0][RTW89_ETSI][46] = 28,
+ [0][0][1][0][RTW89_MKK][46] = 127,
+ [0][0][1][0][RTW89_IC][46] = 78,
+ [0][0][1][0][RTW89_KCC][46] = 74,
+ [0][0][1][0][RTW89_ACMA][46] = 76,
+ [0][0][1][0][RTW89_CHILE][46] = 68,
+ [0][0][1][0][RTW89_UKRAINE][46] = 28,
+ [0][0][1][0][RTW89_MEXICO][46] = 78,
+ [0][0][1][0][RTW89_CN][46] = 76,
+ [0][0][1][0][RTW89_QATAR][46] = 28,
+ [0][0][1][0][RTW89_UK][46] = 58,
+ [0][0][1][0][RTW89_FCC][48] = 78,
+ [0][0][1][0][RTW89_ETSI][48] = 127,
+ [0][0][1][0][RTW89_MKK][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_KCC][48] = 127,
+ [0][0][1][0][RTW89_ACMA][48] = 127,
+ [0][0][1][0][RTW89_CHILE][48] = 127,
+ [0][0][1][0][RTW89_UKRAINE][48] = 127,
+ [0][0][1][0][RTW89_MEXICO][48] = 127,
+ [0][0][1][0][RTW89_CN][48] = 127,
+ [0][0][1][0][RTW89_QATAR][48] = 127,
+ [0][0][1][0][RTW89_UK][48] = 127,
+ [0][0][1][0][RTW89_FCC][50] = 78,
+ [0][0][1][0][RTW89_ETSI][50] = 127,
+ [0][0][1][0][RTW89_MKK][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_KCC][50] = 127,
+ [0][0][1][0][RTW89_ACMA][50] = 127,
+ [0][0][1][0][RTW89_CHILE][50] = 127,
+ [0][0][1][0][RTW89_UKRAINE][50] = 127,
+ [0][0][1][0][RTW89_MEXICO][50] = 127,
+ [0][0][1][0][RTW89_CN][50] = 127,
+ [0][0][1][0][RTW89_QATAR][50] = 127,
+ [0][0][1][0][RTW89_UK][50] = 127,
+ [0][0][1][0][RTW89_FCC][52] = 78,
+ [0][0][1][0][RTW89_ETSI][52] = 127,
+ [0][0][1][0][RTW89_MKK][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_KCC][52] = 127,
+ [0][0][1][0][RTW89_ACMA][52] = 127,
+ [0][0][1][0][RTW89_CHILE][52] = 127,
+ [0][0][1][0][RTW89_UKRAINE][52] = 127,
+ [0][0][1][0][RTW89_MEXICO][52] = 127,
+ [0][0][1][0][RTW89_CN][52] = 127,
+ [0][0][1][0][RTW89_QATAR][52] = 127,
+ [0][0][1][0][RTW89_UK][52] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 68,
+ [0][1][1][0][RTW89_ETSI][0] = 46,
+ [0][1][1][0][RTW89_MKK][0] = 48,
+ [0][1][1][0][RTW89_IC][0] = 40,
+ [0][1][1][0][RTW89_KCC][0] = 64,
+ [0][1][1][0][RTW89_ACMA][0] = 46,
+ [0][1][1][0][RTW89_CHILE][0] = 30,
+ [0][1][1][0][RTW89_UKRAINE][0] = 40,
+ [0][1][1][0][RTW89_MEXICO][0] = 50,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 46,
+ [0][1][1][0][RTW89_FCC][2] = 68,
+ [0][1][1][0][RTW89_ETSI][2] = 46,
+ [0][1][1][0][RTW89_MKK][2] = 48,
+ [0][1][1][0][RTW89_IC][2] = 40,
+ [0][1][1][0][RTW89_KCC][2] = 64,
+ [0][1][1][0][RTW89_ACMA][2] = 46,
+ [0][1][1][0][RTW89_CHILE][2] = 32,
+ [0][1][1][0][RTW89_UKRAINE][2] = 40,
+ [0][1][1][0][RTW89_MEXICO][2] = 50,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 46,
+ [0][1][1][0][RTW89_FCC][4] = 68,
+ [0][1][1][0][RTW89_ETSI][4] = 46,
+ [0][1][1][0][RTW89_MKK][4] = 48,
+ [0][1][1][0][RTW89_IC][4] = 40,
+ [0][1][1][0][RTW89_KCC][4] = 64,
+ [0][1][1][0][RTW89_ACMA][4] = 46,
+ [0][1][1][0][RTW89_CHILE][4] = 30,
+ [0][1][1][0][RTW89_UKRAINE][4] = 40,
+ [0][1][1][0][RTW89_MEXICO][4] = 50,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 46,
+ [0][1][1][0][RTW89_FCC][6] = 68,
+ [0][1][1][0][RTW89_ETSI][6] = 46,
+ [0][1][1][0][RTW89_MKK][6] = 48,
+ [0][1][1][0][RTW89_IC][6] = 40,
+ [0][1][1][0][RTW89_KCC][6] = 38,
+ [0][1][1][0][RTW89_ACMA][6] = 46,
+ [0][1][1][0][RTW89_CHILE][6] = 30,
+ [0][1][1][0][RTW89_UKRAINE][6] = 40,
+ [0][1][1][0][RTW89_MEXICO][6] = 50,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 46,
+ [0][1][1][0][RTW89_FCC][8] = 68,
+ [0][1][1][0][RTW89_ETSI][8] = 46,
+ [0][1][1][0][RTW89_MKK][8] = 48,
+ [0][1][1][0][RTW89_IC][8] = 52,
+ [0][1][1][0][RTW89_KCC][8] = 64,
+ [0][1][1][0][RTW89_ACMA][8] = 46,
+ [0][1][1][0][RTW89_CHILE][8] = 52,
+ [0][1][1][0][RTW89_UKRAINE][8] = 40,
+ [0][1][1][0][RTW89_MEXICO][8] = 68,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 46,
+ [0][1][1][0][RTW89_FCC][10] = 68,
+ [0][1][1][0][RTW89_ETSI][10] = 46,
+ [0][1][1][0][RTW89_MKK][10] = 48,
+ [0][1][1][0][RTW89_IC][10] = 52,
+ [0][1][1][0][RTW89_KCC][10] = 64,
+ [0][1][1][0][RTW89_ACMA][10] = 46,
+ [0][1][1][0][RTW89_CHILE][10] = 52,
+ [0][1][1][0][RTW89_UKRAINE][10] = 40,
+ [0][1][1][0][RTW89_MEXICO][10] = 68,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 46,
+ [0][1][1][0][RTW89_FCC][12] = 68,
+ [0][1][1][0][RTW89_ETSI][12] = 46,
+ [0][1][1][0][RTW89_MKK][12] = 48,
+ [0][1][1][0][RTW89_IC][12] = 52,
+ [0][1][1][0][RTW89_KCC][12] = 64,
+ [0][1][1][0][RTW89_ACMA][12] = 46,
+ [0][1][1][0][RTW89_CHILE][12] = 52,
+ [0][1][1][0][RTW89_UKRAINE][12] = 40,
+ [0][1][1][0][RTW89_MEXICO][12] = 68,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 46,
+ [0][1][1][0][RTW89_FCC][14] = 68,
+ [0][1][1][0][RTW89_ETSI][14] = 46,
+ [0][1][1][0][RTW89_MKK][14] = 48,
+ [0][1][1][0][RTW89_IC][14] = 52,
+ [0][1][1][0][RTW89_KCC][14] = 64,
+ [0][1][1][0][RTW89_ACMA][14] = 46,
+ [0][1][1][0][RTW89_CHILE][14] = 52,
+ [0][1][1][0][RTW89_UKRAINE][14] = 40,
+ [0][1][1][0][RTW89_MEXICO][14] = 68,
+ [0][1][1][0][RTW89_CN][14] = 46,
+ [0][1][1][0][RTW89_QATAR][14] = 46,
+ [0][1][1][0][RTW89_UK][14] = 46,
+ [0][1][1][0][RTW89_FCC][15] = 66,
+ [0][1][1][0][RTW89_ETSI][15] = 46,
+ [0][1][1][0][RTW89_MKK][15] = 68,
+ [0][1][1][0][RTW89_IC][15] = 66,
+ [0][1][1][0][RTW89_KCC][15] = 62,
+ [0][1][1][0][RTW89_ACMA][15] = 46,
+ [0][1][1][0][RTW89_CHILE][15] = 48,
+ [0][1][1][0][RTW89_UKRAINE][15] = 40,
+ [0][1][1][0][RTW89_MEXICO][15] = 66,
+ [0][1][1][0][RTW89_CN][15] = 127,
+ [0][1][1][0][RTW89_QATAR][15] = 46,
+ [0][1][1][0][RTW89_UK][15] = 46,
+ [0][1][1][0][RTW89_FCC][17] = 68,
+ [0][1][1][0][RTW89_ETSI][17] = 46,
+ [0][1][1][0][RTW89_MKK][17] = 70,
+ [0][1][1][0][RTW89_IC][17] = 68,
+ [0][1][1][0][RTW89_KCC][17] = 62,
+ [0][1][1][0][RTW89_ACMA][17] = 46,
+ [0][1][1][0][RTW89_CHILE][17] = 48,
+ [0][1][1][0][RTW89_UKRAINE][17] = 40,
+ [0][1][1][0][RTW89_MEXICO][17] = 68,
+ [0][1][1][0][RTW89_CN][17] = 127,
+ [0][1][1][0][RTW89_QATAR][17] = 46,
+ [0][1][1][0][RTW89_UK][17] = 46,
+ [0][1][1][0][RTW89_FCC][19] = 68,
+ [0][1][1][0][RTW89_ETSI][19] = 46,
+ [0][1][1][0][RTW89_MKK][19] = 70,
+ [0][1][1][0][RTW89_IC][19] = 68,
+ [0][1][1][0][RTW89_KCC][19] = 62,
+ [0][1][1][0][RTW89_ACMA][19] = 46,
+ [0][1][1][0][RTW89_CHILE][19] = 48,
+ [0][1][1][0][RTW89_UKRAINE][19] = 40,
+ [0][1][1][0][RTW89_MEXICO][19] = 68,
+ [0][1][1][0][RTW89_CN][19] = 127,
+ [0][1][1][0][RTW89_QATAR][19] = 46,
+ [0][1][1][0][RTW89_UK][19] = 46,
+ [0][1][1][0][RTW89_FCC][21] = 68,
+ [0][1][1][0][RTW89_ETSI][21] = 46,
+ [0][1][1][0][RTW89_MKK][21] = 70,
+ [0][1][1][0][RTW89_IC][21] = 68,
+ [0][1][1][0][RTW89_KCC][21] = 62,
+ [0][1][1][0][RTW89_ACMA][21] = 46,
+ [0][1][1][0][RTW89_CHILE][21] = 48,
+ [0][1][1][0][RTW89_UKRAINE][21] = 40,
+ [0][1][1][0][RTW89_MEXICO][21] = 68,
+ [0][1][1][0][RTW89_CN][21] = 127,
+ [0][1][1][0][RTW89_QATAR][21] = 46,
+ [0][1][1][0][RTW89_UK][21] = 46,
+ [0][1][1][0][RTW89_FCC][23] = 68,
+ [0][1][1][0][RTW89_ETSI][23] = 46,
+ [0][1][1][0][RTW89_MKK][23] = 70,
+ [0][1][1][0][RTW89_IC][23] = 68,
+ [0][1][1][0][RTW89_KCC][23] = 62,
+ [0][1][1][0][RTW89_ACMA][23] = 46,
+ [0][1][1][0][RTW89_CHILE][23] = 48,
+ [0][1][1][0][RTW89_UKRAINE][23] = 40,
+ [0][1][1][0][RTW89_MEXICO][23] = 68,
+ [0][1][1][0][RTW89_CN][23] = 127,
+ [0][1][1][0][RTW89_QATAR][23] = 46,
+ [0][1][1][0][RTW89_UK][23] = 46,
+ [0][1][1][0][RTW89_FCC][25] = 68,
+ [0][1][1][0][RTW89_ETSI][25] = 46,
+ [0][1][1][0][RTW89_MKK][25] = 68,
+ [0][1][1][0][RTW89_IC][25] = 127,
+ [0][1][1][0][RTW89_KCC][25] = 62,
+ [0][1][1][0][RTW89_ACMA][25] = 127,
+ [0][1][1][0][RTW89_CHILE][25] = 48,
+ [0][1][1][0][RTW89_UKRAINE][25] = 40,
+ [0][1][1][0][RTW89_MEXICO][25] = 68,
+ [0][1][1][0][RTW89_CN][25] = 127,
+ [0][1][1][0][RTW89_QATAR][25] = 46,
+ [0][1][1][0][RTW89_UK][25] = 46,
+ [0][1][1][0][RTW89_FCC][27] = 68,
+ [0][1][1][0][RTW89_ETSI][27] = 46,
+ [0][1][1][0][RTW89_MKK][27] = 70,
+ [0][1][1][0][RTW89_IC][27] = 127,
+ [0][1][1][0][RTW89_KCC][27] = 62,
+ [0][1][1][0][RTW89_ACMA][27] = 127,
+ [0][1][1][0][RTW89_CHILE][27] = 50,
+ [0][1][1][0][RTW89_UKRAINE][27] = 40,
+ [0][1][1][0][RTW89_MEXICO][27] = 68,
+ [0][1][1][0][RTW89_CN][27] = 127,
+ [0][1][1][0][RTW89_QATAR][27] = 46,
+ [0][1][1][0][RTW89_UK][27] = 46,
+ [0][1][1][0][RTW89_FCC][29] = 68,
+ [0][1][1][0][RTW89_ETSI][29] = 46,
+ [0][1][1][0][RTW89_MKK][29] = 70,
+ [0][1][1][0][RTW89_IC][29] = 127,
+ [0][1][1][0][RTW89_KCC][29] = 62,
+ [0][1][1][0][RTW89_ACMA][29] = 127,
+ [0][1][1][0][RTW89_CHILE][29] = 50,
+ [0][1][1][0][RTW89_UKRAINE][29] = 40,
+ [0][1][1][0][RTW89_MEXICO][29] = 68,
+ [0][1][1][0][RTW89_CN][29] = 127,
+ [0][1][1][0][RTW89_QATAR][29] = 46,
+ [0][1][1][0][RTW89_UK][29] = 46,
+ [0][1][1][0][RTW89_FCC][31] = 68,
+ [0][1][1][0][RTW89_ETSI][31] = 46,
+ [0][1][1][0][RTW89_MKK][31] = 70,
+ [0][1][1][0][RTW89_IC][31] = 68,
+ [0][1][1][0][RTW89_KCC][31] = 62,
+ [0][1][1][0][RTW89_ACMA][31] = 46,
+ [0][1][1][0][RTW89_CHILE][31] = 50,
+ [0][1][1][0][RTW89_UKRAINE][31] = 40,
+ [0][1][1][0][RTW89_MEXICO][31] = 68,
+ [0][1][1][0][RTW89_CN][31] = 127,
+ [0][1][1][0][RTW89_QATAR][31] = 46,
+ [0][1][1][0][RTW89_UK][31] = 46,
+ [0][1][1][0][RTW89_FCC][33] = 68,
+ [0][1][1][0][RTW89_ETSI][33] = 46,
+ [0][1][1][0][RTW89_MKK][33] = 70,
+ [0][1][1][0][RTW89_IC][33] = 68,
+ [0][1][1][0][RTW89_KCC][33] = 62,
+ [0][1][1][0][RTW89_ACMA][33] = 46,
+ [0][1][1][0][RTW89_CHILE][33] = 50,
+ [0][1][1][0][RTW89_UKRAINE][33] = 40,
+ [0][1][1][0][RTW89_MEXICO][33] = 68,
+ [0][1][1][0][RTW89_CN][33] = 127,
+ [0][1][1][0][RTW89_QATAR][33] = 46,
+ [0][1][1][0][RTW89_UK][33] = 46,
+ [0][1][1][0][RTW89_FCC][35] = 66,
+ [0][1][1][0][RTW89_ETSI][35] = 46,
+ [0][1][1][0][RTW89_MKK][35] = 70,
+ [0][1][1][0][RTW89_IC][35] = 66,
+ [0][1][1][0][RTW89_KCC][35] = 62,
+ [0][1][1][0][RTW89_ACMA][35] = 46,
+ [0][1][1][0][RTW89_CHILE][35] = 50,
+ [0][1][1][0][RTW89_UKRAINE][35] = 40,
+ [0][1][1][0][RTW89_MEXICO][35] = 66,
+ [0][1][1][0][RTW89_CN][35] = 127,
+ [0][1][1][0][RTW89_QATAR][35] = 46,
+ [0][1][1][0][RTW89_UK][35] = 46,
+ [0][1][1][0][RTW89_FCC][37] = 68,
+ [0][1][1][0][RTW89_ETSI][37] = 127,
+ [0][1][1][0][RTW89_MKK][37] = 70,
+ [0][1][1][0][RTW89_IC][37] = 68,
+ [0][1][1][0][RTW89_KCC][37] = 62,
+ [0][1][1][0][RTW89_ACMA][37] = 70,
+ [0][1][1][0][RTW89_CHILE][37] = 50,
+ [0][1][1][0][RTW89_UKRAINE][37] = 127,
+ [0][1][1][0][RTW89_MEXICO][37] = 68,
+ [0][1][1][0][RTW89_CN][37] = 127,
+ [0][1][1][0][RTW89_QATAR][37] = 127,
+ [0][1][1][0][RTW89_UK][37] = 76,
+ [0][1][1][0][RTW89_FCC][38] = 78,
+ [0][1][1][0][RTW89_ETSI][38] = 16,
+ [0][1][1][0][RTW89_MKK][38] = 127,
+ [0][1][1][0][RTW89_IC][38] = 78,
+ [0][1][1][0][RTW89_KCC][38] = 60,
+ [0][1][1][0][RTW89_ACMA][38] = 72,
+ [0][1][1][0][RTW89_CHILE][38] = 48,
+ [0][1][1][0][RTW89_UKRAINE][38] = 16,
+ [0][1][1][0][RTW89_MEXICO][38] = 78,
+ [0][1][1][0][RTW89_CN][38] = 76,
+ [0][1][1][0][RTW89_QATAR][38] = 16,
+ [0][1][1][0][RTW89_UK][38] = 46,
+ [0][1][1][0][RTW89_FCC][40] = 78,
+ [0][1][1][0][RTW89_ETSI][40] = 16,
+ [0][1][1][0][RTW89_MKK][40] = 127,
+ [0][1][1][0][RTW89_IC][40] = 78,
+ [0][1][1][0][RTW89_KCC][40] = 60,
+ [0][1][1][0][RTW89_ACMA][40] = 72,
+ [0][1][1][0][RTW89_CHILE][40] = 48,
+ [0][1][1][0][RTW89_UKRAINE][40] = 16,
+ [0][1][1][0][RTW89_MEXICO][40] = 78,
+ [0][1][1][0][RTW89_CN][40] = 76,
+ [0][1][1][0][RTW89_QATAR][40] = 16,
+ [0][1][1][0][RTW89_UK][40] = 46,
+ [0][1][1][0][RTW89_FCC][42] = 78,
+ [0][1][1][0][RTW89_ETSI][42] = 16,
+ [0][1][1][0][RTW89_MKK][42] = 127,
+ [0][1][1][0][RTW89_IC][42] = 78,
+ [0][1][1][0][RTW89_KCC][42] = 60,
+ [0][1][1][0][RTW89_ACMA][42] = 76,
+ [0][1][1][0][RTW89_CHILE][42] = 48,
+ [0][1][1][0][RTW89_UKRAINE][42] = 16,
+ [0][1][1][0][RTW89_MEXICO][42] = 78,
+ [0][1][1][0][RTW89_CN][42] = 76,
+ [0][1][1][0][RTW89_QATAR][42] = 16,
+ [0][1][1][0][RTW89_UK][42] = 46,
+ [0][1][1][0][RTW89_FCC][44] = 78,
+ [0][1][1][0][RTW89_ETSI][44] = 16,
+ [0][1][1][0][RTW89_MKK][44] = 127,
+ [0][1][1][0][RTW89_IC][44] = 78,
+ [0][1][1][0][RTW89_KCC][44] = 60,
+ [0][1][1][0][RTW89_ACMA][44] = 76,
+ [0][1][1][0][RTW89_CHILE][44] = 48,
+ [0][1][1][0][RTW89_UKRAINE][44] = 16,
+ [0][1][1][0][RTW89_MEXICO][44] = 78,
+ [0][1][1][0][RTW89_CN][44] = 76,
+ [0][1][1][0][RTW89_QATAR][44] = 16,
+ [0][1][1][0][RTW89_UK][44] = 46,
+ [0][1][1][0][RTW89_FCC][46] = 78,
+ [0][1][1][0][RTW89_ETSI][46] = 16,
+ [0][1][1][0][RTW89_MKK][46] = 127,
+ [0][1][1][0][RTW89_IC][46] = 78,
+ [0][1][1][0][RTW89_KCC][46] = 60,
+ [0][1][1][0][RTW89_ACMA][46] = 76,
+ [0][1][1][0][RTW89_CHILE][46] = 48,
+ [0][1][1][0][RTW89_UKRAINE][46] = 16,
+ [0][1][1][0][RTW89_MEXICO][46] = 78,
+ [0][1][1][0][RTW89_CN][46] = 76,
+ [0][1][1][0][RTW89_QATAR][46] = 16,
+ [0][1][1][0][RTW89_UK][46] = 46,
+ [0][1][1][0][RTW89_FCC][48] = 56,
+ [0][1][1][0][RTW89_ETSI][48] = 127,
+ [0][1][1][0][RTW89_MKK][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_KCC][48] = 127,
+ [0][1][1][0][RTW89_ACMA][48] = 127,
+ [0][1][1][0][RTW89_CHILE][48] = 127,
+ [0][1][1][0][RTW89_UKRAINE][48] = 127,
+ [0][1][1][0][RTW89_MEXICO][48] = 127,
+ [0][1][1][0][RTW89_CN][48] = 127,
+ [0][1][1][0][RTW89_QATAR][48] = 127,
+ [0][1][1][0][RTW89_UK][48] = 127,
+ [0][1][1][0][RTW89_FCC][50] = 56,
+ [0][1][1][0][RTW89_ETSI][50] = 127,
+ [0][1][1][0][RTW89_MKK][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_KCC][50] = 127,
+ [0][1][1][0][RTW89_ACMA][50] = 127,
+ [0][1][1][0][RTW89_CHILE][50] = 127,
+ [0][1][1][0][RTW89_UKRAINE][50] = 127,
+ [0][1][1][0][RTW89_MEXICO][50] = 127,
+ [0][1][1][0][RTW89_CN][50] = 127,
+ [0][1][1][0][RTW89_QATAR][50] = 127,
+ [0][1][1][0][RTW89_UK][50] = 127,
+ [0][1][1][0][RTW89_FCC][52] = 56,
+ [0][1][1][0][RTW89_ETSI][52] = 127,
+ [0][1][1][0][RTW89_MKK][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_KCC][52] = 127,
+ [0][1][1][0][RTW89_ACMA][52] = 127,
+ [0][1][1][0][RTW89_CHILE][52] = 127,
+ [0][1][1][0][RTW89_UKRAINE][52] = 127,
+ [0][1][1][0][RTW89_MEXICO][52] = 127,
+ [0][1][1][0][RTW89_CN][52] = 127,
+ [0][1][1][0][RTW89_QATAR][52] = 127,
+ [0][1][1][0][RTW89_UK][52] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 78,
+ [0][0][2][0][RTW89_ETSI][0] = 60,
+ [0][0][2][0][RTW89_MKK][0] = 62,
+ [0][0][2][0][RTW89_IC][0] = 64,
+ [0][0][2][0][RTW89_KCC][0] = 74,
+ [0][0][2][0][RTW89_ACMA][0] = 60,
+ [0][0][2][0][RTW89_CHILE][0] = 42,
+ [0][0][2][0][RTW89_UKRAINE][0] = 52,
+ [0][0][2][0][RTW89_MEXICO][0] = 62,
+ [0][0][2][0][RTW89_CN][0] = 60,
+ [0][0][2][0][RTW89_QATAR][0] = 60,
+ [0][0][2][0][RTW89_UK][0] = 60,
+ [0][0][2][0][RTW89_FCC][2] = 78,
+ [0][0][2][0][RTW89_ETSI][2] = 60,
+ [0][0][2][0][RTW89_MKK][2] = 62,
+ [0][0][2][0][RTW89_IC][2] = 64,
+ [0][0][2][0][RTW89_KCC][2] = 74,
+ [0][0][2][0][RTW89_ACMA][2] = 60,
+ [0][0][2][0][RTW89_CHILE][2] = 42,
+ [0][0][2][0][RTW89_UKRAINE][2] = 52,
+ [0][0][2][0][RTW89_MEXICO][2] = 62,
+ [0][0][2][0][RTW89_CN][2] = 60,
+ [0][0][2][0][RTW89_QATAR][2] = 60,
+ [0][0][2][0][RTW89_UK][2] = 60,
+ [0][0][2][0][RTW89_FCC][4] = 78,
+ [0][0][2][0][RTW89_ETSI][4] = 60,
+ [0][0][2][0][RTW89_MKK][4] = 62,
+ [0][0][2][0][RTW89_IC][4] = 64,
+ [0][0][2][0][RTW89_KCC][4] = 74,
+ [0][0][2][0][RTW89_ACMA][4] = 60,
+ [0][0][2][0][RTW89_CHILE][4] = 42,
+ [0][0][2][0][RTW89_UKRAINE][4] = 52,
+ [0][0][2][0][RTW89_MEXICO][4] = 62,
+ [0][0][2][0][RTW89_CN][4] = 60,
+ [0][0][2][0][RTW89_QATAR][4] = 60,
+ [0][0][2][0][RTW89_UK][4] = 60,
+ [0][0][2][0][RTW89_FCC][6] = 78,
+ [0][0][2][0][RTW89_ETSI][6] = 60,
+ [0][0][2][0][RTW89_MKK][6] = 62,
+ [0][0][2][0][RTW89_IC][6] = 64,
+ [0][0][2][0][RTW89_KCC][6] = 50,
+ [0][0][2][0][RTW89_ACMA][6] = 60,
+ [0][0][2][0][RTW89_CHILE][6] = 42,
+ [0][0][2][0][RTW89_UKRAINE][6] = 52,
+ [0][0][2][0][RTW89_MEXICO][6] = 62,
+ [0][0][2][0][RTW89_CN][6] = 60,
+ [0][0][2][0][RTW89_QATAR][6] = 60,
+ [0][0][2][0][RTW89_UK][6] = 60,
+ [0][0][2][0][RTW89_FCC][8] = 78,
+ [0][0][2][0][RTW89_ETSI][8] = 60,
+ [0][0][2][0][RTW89_MKK][8] = 62,
+ [0][0][2][0][RTW89_IC][8] = 64,
+ [0][0][2][0][RTW89_KCC][8] = 74,
+ [0][0][2][0][RTW89_ACMA][8] = 60,
+ [0][0][2][0][RTW89_CHILE][8] = 66,
+ [0][0][2][0][RTW89_UKRAINE][8] = 52,
+ [0][0][2][0][RTW89_MEXICO][8] = 78,
+ [0][0][2][0][RTW89_CN][8] = 60,
+ [0][0][2][0][RTW89_QATAR][8] = 60,
+ [0][0][2][0][RTW89_UK][8] = 60,
+ [0][0][2][0][RTW89_FCC][10] = 78,
+ [0][0][2][0][RTW89_ETSI][10] = 60,
+ [0][0][2][0][RTW89_MKK][10] = 62,
+ [0][0][2][0][RTW89_IC][10] = 64,
+ [0][0][2][0][RTW89_KCC][10] = 74,
+ [0][0][2][0][RTW89_ACMA][10] = 60,
+ [0][0][2][0][RTW89_CHILE][10] = 66,
+ [0][0][2][0][RTW89_UKRAINE][10] = 52,
+ [0][0][2][0][RTW89_MEXICO][10] = 78,
+ [0][0][2][0][RTW89_CN][10] = 60,
+ [0][0][2][0][RTW89_QATAR][10] = 60,
+ [0][0][2][0][RTW89_UK][10] = 60,
+ [0][0][2][0][RTW89_FCC][12] = 78,
+ [0][0][2][0][RTW89_ETSI][12] = 60,
+ [0][0][2][0][RTW89_MKK][12] = 62,
+ [0][0][2][0][RTW89_IC][12] = 64,
+ [0][0][2][0][RTW89_KCC][12] = 74,
+ [0][0][2][0][RTW89_ACMA][12] = 60,
+ [0][0][2][0][RTW89_CHILE][12] = 66,
+ [0][0][2][0][RTW89_UKRAINE][12] = 52,
+ [0][0][2][0][RTW89_MEXICO][12] = 78,
+ [0][0][2][0][RTW89_CN][12] = 60,
+ [0][0][2][0][RTW89_QATAR][12] = 60,
+ [0][0][2][0][RTW89_UK][12] = 60,
+ [0][0][2][0][RTW89_FCC][14] = 78,
+ [0][0][2][0][RTW89_ETSI][14] = 60,
+ [0][0][2][0][RTW89_MKK][14] = 62,
+ [0][0][2][0][RTW89_IC][14] = 64,
+ [0][0][2][0][RTW89_KCC][14] = 74,
+ [0][0][2][0][RTW89_ACMA][14] = 60,
+ [0][0][2][0][RTW89_CHILE][14] = 66,
+ [0][0][2][0][RTW89_UKRAINE][14] = 52,
+ [0][0][2][0][RTW89_MEXICO][14] = 78,
+ [0][0][2][0][RTW89_CN][14] = 60,
+ [0][0][2][0][RTW89_QATAR][14] = 60,
+ [0][0][2][0][RTW89_UK][14] = 60,
+ [0][0][2][0][RTW89_FCC][15] = 74,
+ [0][0][2][0][RTW89_ETSI][15] = 60,
+ [0][0][2][0][RTW89_MKK][15] = 76,
+ [0][0][2][0][RTW89_IC][15] = 74,
+ [0][0][2][0][RTW89_KCC][15] = 74,
+ [0][0][2][0][RTW89_ACMA][15] = 60,
+ [0][0][2][0][RTW89_CHILE][15] = 64,
+ [0][0][2][0][RTW89_UKRAINE][15] = 52,
+ [0][0][2][0][RTW89_MEXICO][15] = 74,
+ [0][0][2][0][RTW89_CN][15] = 127,
+ [0][0][2][0][RTW89_QATAR][15] = 60,
+ [0][0][2][0][RTW89_UK][15] = 60,
+ [0][0][2][0][RTW89_FCC][17] = 78,
+ [0][0][2][0][RTW89_ETSI][17] = 60,
+ [0][0][2][0][RTW89_MKK][17] = 76,
+ [0][0][2][0][RTW89_IC][17] = 78,
+ [0][0][2][0][RTW89_KCC][17] = 74,
+ [0][0][2][0][RTW89_ACMA][17] = 60,
+ [0][0][2][0][RTW89_CHILE][17] = 64,
+ [0][0][2][0][RTW89_UKRAINE][17] = 52,
+ [0][0][2][0][RTW89_MEXICO][17] = 78,
+ [0][0][2][0][RTW89_CN][17] = 127,
+ [0][0][2][0][RTW89_QATAR][17] = 60,
+ [0][0][2][0][RTW89_UK][17] = 60,
+ [0][0][2][0][RTW89_FCC][19] = 78,
+ [0][0][2][0][RTW89_ETSI][19] = 60,
+ [0][0][2][0][RTW89_MKK][19] = 76,
+ [0][0][2][0][RTW89_IC][19] = 78,
+ [0][0][2][0][RTW89_KCC][19] = 74,
+ [0][0][2][0][RTW89_ACMA][19] = 60,
+ [0][0][2][0][RTW89_CHILE][19] = 64,
+ [0][0][2][0][RTW89_UKRAINE][19] = 52,
+ [0][0][2][0][RTW89_MEXICO][19] = 78,
+ [0][0][2][0][RTW89_CN][19] = 127,
+ [0][0][2][0][RTW89_QATAR][19] = 60,
+ [0][0][2][0][RTW89_UK][19] = 60,
+ [0][0][2][0][RTW89_FCC][21] = 78,
+ [0][0][2][0][RTW89_ETSI][21] = 60,
+ [0][0][2][0][RTW89_MKK][21] = 76,
+ [0][0][2][0][RTW89_IC][21] = 78,
+ [0][0][2][0][RTW89_KCC][21] = 74,
+ [0][0][2][0][RTW89_ACMA][21] = 60,
+ [0][0][2][0][RTW89_CHILE][21] = 66,
+ [0][0][2][0][RTW89_UKRAINE][21] = 52,
+ [0][0][2][0][RTW89_MEXICO][21] = 78,
+ [0][0][2][0][RTW89_CN][21] = 127,
+ [0][0][2][0][RTW89_QATAR][21] = 60,
+ [0][0][2][0][RTW89_UK][21] = 60,
+ [0][0][2][0][RTW89_FCC][23] = 78,
+ [0][0][2][0][RTW89_ETSI][23] = 60,
+ [0][0][2][0][RTW89_MKK][23] = 76,
+ [0][0][2][0][RTW89_IC][23] = 78,
+ [0][0][2][0][RTW89_KCC][23] = 74,
+ [0][0][2][0][RTW89_ACMA][23] = 60,
+ [0][0][2][0][RTW89_CHILE][23] = 66,
+ [0][0][2][0][RTW89_UKRAINE][23] = 52,
+ [0][0][2][0][RTW89_MEXICO][23] = 78,
+ [0][0][2][0][RTW89_CN][23] = 127,
+ [0][0][2][0][RTW89_QATAR][23] = 60,
+ [0][0][2][0][RTW89_UK][23] = 60,
+ [0][0][2][0][RTW89_FCC][25] = 78,
+ [0][0][2][0][RTW89_ETSI][25] = 60,
+ [0][0][2][0][RTW89_MKK][25] = 76,
+ [0][0][2][0][RTW89_IC][25] = 127,
+ [0][0][2][0][RTW89_KCC][25] = 74,
+ [0][0][2][0][RTW89_ACMA][25] = 127,
+ [0][0][2][0][RTW89_CHILE][25] = 66,
+ [0][0][2][0][RTW89_UKRAINE][25] = 52,
+ [0][0][2][0][RTW89_MEXICO][25] = 78,
+ [0][0][2][0][RTW89_CN][25] = 127,
+ [0][0][2][0][RTW89_QATAR][25] = 60,
+ [0][0][2][0][RTW89_UK][25] = 60,
+ [0][0][2][0][RTW89_FCC][27] = 78,
+ [0][0][2][0][RTW89_ETSI][27] = 60,
+ [0][0][2][0][RTW89_MKK][27] = 76,
+ [0][0][2][0][RTW89_IC][27] = 127,
+ [0][0][2][0][RTW89_KCC][27] = 74,
+ [0][0][2][0][RTW89_ACMA][27] = 127,
+ [0][0][2][0][RTW89_CHILE][27] = 64,
+ [0][0][2][0][RTW89_UKRAINE][27] = 52,
+ [0][0][2][0][RTW89_MEXICO][27] = 78,
+ [0][0][2][0][RTW89_CN][27] = 127,
+ [0][0][2][0][RTW89_QATAR][27] = 60,
+ [0][0][2][0][RTW89_UK][27] = 60,
+ [0][0][2][0][RTW89_FCC][29] = 78,
+ [0][0][2][0][RTW89_ETSI][29] = 60,
+ [0][0][2][0][RTW89_MKK][29] = 76,
+ [0][0][2][0][RTW89_IC][29] = 127,
+ [0][0][2][0][RTW89_KCC][29] = 74,
+ [0][0][2][0][RTW89_ACMA][29] = 127,
+ [0][0][2][0][RTW89_CHILE][29] = 64,
+ [0][0][2][0][RTW89_UKRAINE][29] = 52,
+ [0][0][2][0][RTW89_MEXICO][29] = 78,
+ [0][0][2][0][RTW89_CN][29] = 127,
+ [0][0][2][0][RTW89_QATAR][29] = 60,
+ [0][0][2][0][RTW89_UK][29] = 60,
+ [0][0][2][0][RTW89_FCC][31] = 78,
+ [0][0][2][0][RTW89_ETSI][31] = 60,
+ [0][0][2][0][RTW89_MKK][31] = 76,
+ [0][0][2][0][RTW89_IC][31] = 78,
+ [0][0][2][0][RTW89_KCC][31] = 74,
+ [0][0][2][0][RTW89_ACMA][31] = 60,
+ [0][0][2][0][RTW89_CHILE][31] = 64,
+ [0][0][2][0][RTW89_UKRAINE][31] = 52,
+ [0][0][2][0][RTW89_MEXICO][31] = 78,
+ [0][0][2][0][RTW89_CN][31] = 127,
+ [0][0][2][0][RTW89_QATAR][31] = 60,
+ [0][0][2][0][RTW89_UK][31] = 60,
+ [0][0][2][0][RTW89_FCC][33] = 78,
+ [0][0][2][0][RTW89_ETSI][33] = 60,
+ [0][0][2][0][RTW89_MKK][33] = 76,
+ [0][0][2][0][RTW89_IC][33] = 78,
+ [0][0][2][0][RTW89_KCC][33] = 74,
+ [0][0][2][0][RTW89_ACMA][33] = 60,
+ [0][0][2][0][RTW89_CHILE][33] = 64,
+ [0][0][2][0][RTW89_UKRAINE][33] = 52,
+ [0][0][2][0][RTW89_MEXICO][33] = 78,
+ [0][0][2][0][RTW89_CN][33] = 127,
+ [0][0][2][0][RTW89_QATAR][33] = 60,
+ [0][0][2][0][RTW89_UK][33] = 60,
+ [0][0][2][0][RTW89_FCC][35] = 70,
+ [0][0][2][0][RTW89_ETSI][35] = 60,
+ [0][0][2][0][RTW89_MKK][35] = 76,
+ [0][0][2][0][RTW89_IC][35] = 70,
+ [0][0][2][0][RTW89_KCC][35] = 74,
+ [0][0][2][0][RTW89_ACMA][35] = 60,
+ [0][0][2][0][RTW89_CHILE][35] = 64,
+ [0][0][2][0][RTW89_UKRAINE][35] = 52,
+ [0][0][2][0][RTW89_MEXICO][35] = 70,
+ [0][0][2][0][RTW89_CN][35] = 127,
+ [0][0][2][0][RTW89_QATAR][35] = 60,
+ [0][0][2][0][RTW89_UK][35] = 60,
+ [0][0][2][0][RTW89_FCC][37] = 78,
+ [0][0][2][0][RTW89_ETSI][37] = 127,
+ [0][0][2][0][RTW89_MKK][37] = 76,
+ [0][0][2][0][RTW89_IC][37] = 78,
+ [0][0][2][0][RTW89_KCC][37] = 74,
+ [0][0][2][0][RTW89_ACMA][37] = 76,
+ [0][0][2][0][RTW89_CHILE][37] = 64,
+ [0][0][2][0][RTW89_UKRAINE][37] = 127,
+ [0][0][2][0][RTW89_MEXICO][37] = 78,
+ [0][0][2][0][RTW89_CN][37] = 127,
+ [0][0][2][0][RTW89_QATAR][37] = 127,
+ [0][0][2][0][RTW89_UK][37] = 74,
+ [0][0][2][0][RTW89_FCC][38] = 78,
+ [0][0][2][0][RTW89_ETSI][38] = 28,
+ [0][0][2][0][RTW89_MKK][38] = 127,
+ [0][0][2][0][RTW89_IC][38] = 78,
+ [0][0][2][0][RTW89_KCC][38] = 72,
+ [0][0][2][0][RTW89_ACMA][38] = 76,
+ [0][0][2][0][RTW89_CHILE][38] = 64,
+ [0][0][2][0][RTW89_UKRAINE][38] = 28,
+ [0][0][2][0][RTW89_MEXICO][38] = 78,
+ [0][0][2][0][RTW89_CN][38] = 76,
+ [0][0][2][0][RTW89_QATAR][38] = 28,
+ [0][0][2][0][RTW89_UK][38] = 60,
+ [0][0][2][0][RTW89_FCC][40] = 78,
+ [0][0][2][0][RTW89_ETSI][40] = 28,
+ [0][0][2][0][RTW89_MKK][40] = 127,
+ [0][0][2][0][RTW89_IC][40] = 78,
+ [0][0][2][0][RTW89_KCC][40] = 72,
+ [0][0][2][0][RTW89_ACMA][40] = 76,
+ [0][0][2][0][RTW89_CHILE][40] = 64,
+ [0][0][2][0][RTW89_UKRAINE][40] = 28,
+ [0][0][2][0][RTW89_MEXICO][40] = 78,
+ [0][0][2][0][RTW89_CN][40] = 76,
+ [0][0][2][0][RTW89_QATAR][40] = 28,
+ [0][0][2][0][RTW89_UK][40] = 60,
+ [0][0][2][0][RTW89_FCC][42] = 78,
+ [0][0][2][0][RTW89_ETSI][42] = 28,
+ [0][0][2][0][RTW89_MKK][42] = 127,
+ [0][0][2][0][RTW89_IC][42] = 78,
+ [0][0][2][0][RTW89_KCC][42] = 72,
+ [0][0][2][0][RTW89_ACMA][42] = 76,
+ [0][0][2][0][RTW89_CHILE][42] = 64,
+ [0][0][2][0][RTW89_UKRAINE][42] = 28,
+ [0][0][2][0][RTW89_MEXICO][42] = 78,
+ [0][0][2][0][RTW89_CN][42] = 76,
+ [0][0][2][0][RTW89_QATAR][42] = 28,
+ [0][0][2][0][RTW89_UK][42] = 60,
+ [0][0][2][0][RTW89_FCC][44] = 78,
+ [0][0][2][0][RTW89_ETSI][44] = 28,
+ [0][0][2][0][RTW89_MKK][44] = 127,
+ [0][0][2][0][RTW89_IC][44] = 78,
+ [0][0][2][0][RTW89_KCC][44] = 72,
+ [0][0][2][0][RTW89_ACMA][44] = 76,
+ [0][0][2][0][RTW89_CHILE][44] = 66,
+ [0][0][2][0][RTW89_UKRAINE][44] = 28,
+ [0][0][2][0][RTW89_MEXICO][44] = 78,
+ [0][0][2][0][RTW89_CN][44] = 76,
+ [0][0][2][0][RTW89_QATAR][44] = 28,
+ [0][0][2][0][RTW89_UK][44] = 60,
+ [0][0][2][0][RTW89_FCC][46] = 78,
+ [0][0][2][0][RTW89_ETSI][46] = 28,
+ [0][0][2][0][RTW89_MKK][46] = 127,
+ [0][0][2][0][RTW89_IC][46] = 78,
+ [0][0][2][0][RTW89_KCC][46] = 72,
+ [0][0][2][0][RTW89_ACMA][46] = 76,
+ [0][0][2][0][RTW89_CHILE][46] = 66,
+ [0][0][2][0][RTW89_UKRAINE][46] = 28,
+ [0][0][2][0][RTW89_MEXICO][46] = 78,
+ [0][0][2][0][RTW89_CN][46] = 76,
+ [0][0][2][0][RTW89_QATAR][46] = 28,
+ [0][0][2][0][RTW89_UK][46] = 60,
+ [0][0][2][0][RTW89_FCC][48] = 78,
+ [0][0][2][0][RTW89_ETSI][48] = 127,
+ [0][0][2][0][RTW89_MKK][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_KCC][48] = 127,
+ [0][0][2][0][RTW89_ACMA][48] = 127,
+ [0][0][2][0][RTW89_CHILE][48] = 127,
+ [0][0][2][0][RTW89_UKRAINE][48] = 127,
+ [0][0][2][0][RTW89_MEXICO][48] = 127,
+ [0][0][2][0][RTW89_CN][48] = 127,
+ [0][0][2][0][RTW89_QATAR][48] = 127,
+ [0][0][2][0][RTW89_UK][48] = 127,
+ [0][0][2][0][RTW89_FCC][50] = 78,
+ [0][0][2][0][RTW89_ETSI][50] = 127,
+ [0][0][2][0][RTW89_MKK][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_KCC][50] = 127,
+ [0][0][2][0][RTW89_ACMA][50] = 127,
+ [0][0][2][0][RTW89_CHILE][50] = 127,
+ [0][0][2][0][RTW89_UKRAINE][50] = 127,
+ [0][0][2][0][RTW89_MEXICO][50] = 127,
+ [0][0][2][0][RTW89_CN][50] = 127,
+ [0][0][2][0][RTW89_QATAR][50] = 127,
+ [0][0][2][0][RTW89_UK][50] = 127,
+ [0][0][2][0][RTW89_FCC][52] = 78,
+ [0][0][2][0][RTW89_ETSI][52] = 127,
+ [0][0][2][0][RTW89_MKK][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_KCC][52] = 127,
+ [0][0][2][0][RTW89_ACMA][52] = 127,
+ [0][0][2][0][RTW89_CHILE][52] = 127,
+ [0][0][2][0][RTW89_UKRAINE][52] = 127,
+ [0][0][2][0][RTW89_MEXICO][52] = 127,
+ [0][0][2][0][RTW89_CN][52] = 127,
+ [0][0][2][0][RTW89_QATAR][52] = 127,
+ [0][0][2][0][RTW89_UK][52] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 70,
+ [0][1][2][0][RTW89_ETSI][0] = 48,
+ [0][1][2][0][RTW89_MKK][0] = 50,
+ [0][1][2][0][RTW89_IC][0] = 42,
+ [0][1][2][0][RTW89_KCC][0] = 62,
+ [0][1][2][0][RTW89_ACMA][0] = 48,
+ [0][1][2][0][RTW89_CHILE][0] = 30,
+ [0][1][2][0][RTW89_UKRAINE][0] = 40,
+ [0][1][2][0][RTW89_MEXICO][0] = 50,
+ [0][1][2][0][RTW89_CN][0] = 48,
+ [0][1][2][0][RTW89_QATAR][0] = 48,
+ [0][1][2][0][RTW89_UK][0] = 48,
+ [0][1][2][0][RTW89_FCC][2] = 70,
+ [0][1][2][0][RTW89_ETSI][2] = 48,
+ [0][1][2][0][RTW89_MKK][2] = 50,
+ [0][1][2][0][RTW89_IC][2] = 42,
+ [0][1][2][0][RTW89_KCC][2] = 62,
+ [0][1][2][0][RTW89_ACMA][2] = 48,
+ [0][1][2][0][RTW89_CHILE][2] = 30,
+ [0][1][2][0][RTW89_UKRAINE][2] = 40,
+ [0][1][2][0][RTW89_MEXICO][2] = 50,
+ [0][1][2][0][RTW89_CN][2] = 48,
+ [0][1][2][0][RTW89_QATAR][2] = 48,
+ [0][1][2][0][RTW89_UK][2] = 48,
+ [0][1][2][0][RTW89_FCC][4] = 70,
+ [0][1][2][0][RTW89_ETSI][4] = 48,
+ [0][1][2][0][RTW89_MKK][4] = 50,
+ [0][1][2][0][RTW89_IC][4] = 42,
+ [0][1][2][0][RTW89_KCC][4] = 62,
+ [0][1][2][0][RTW89_ACMA][4] = 48,
+ [0][1][2][0][RTW89_CHILE][4] = 30,
+ [0][1][2][0][RTW89_UKRAINE][4] = 40,
+ [0][1][2][0][RTW89_MEXICO][4] = 50,
+ [0][1][2][0][RTW89_CN][4] = 48,
+ [0][1][2][0][RTW89_QATAR][4] = 48,
+ [0][1][2][0][RTW89_UK][4] = 48,
+ [0][1][2][0][RTW89_FCC][6] = 70,
+ [0][1][2][0][RTW89_ETSI][6] = 48,
+ [0][1][2][0][RTW89_MKK][6] = 50,
+ [0][1][2][0][RTW89_IC][6] = 42,
+ [0][1][2][0][RTW89_KCC][6] = 34,
+ [0][1][2][0][RTW89_ACMA][6] = 48,
+ [0][1][2][0][RTW89_CHILE][6] = 30,
+ [0][1][2][0][RTW89_UKRAINE][6] = 40,
+ [0][1][2][0][RTW89_MEXICO][6] = 50,
+ [0][1][2][0][RTW89_CN][6] = 48,
+ [0][1][2][0][RTW89_QATAR][6] = 48,
+ [0][1][2][0][RTW89_UK][6] = 48,
+ [0][1][2][0][RTW89_FCC][8] = 70,
+ [0][1][2][0][RTW89_ETSI][8] = 48,
+ [0][1][2][0][RTW89_MKK][8] = 50,
+ [0][1][2][0][RTW89_IC][8] = 52,
+ [0][1][2][0][RTW89_KCC][8] = 62,
+ [0][1][2][0][RTW89_ACMA][8] = 48,
+ [0][1][2][0][RTW89_CHILE][8] = 50,
+ [0][1][2][0][RTW89_UKRAINE][8] = 40,
+ [0][1][2][0][RTW89_MEXICO][8] = 70,
+ [0][1][2][0][RTW89_CN][8] = 48,
+ [0][1][2][0][RTW89_QATAR][8] = 48,
+ [0][1][2][0][RTW89_UK][8] = 48,
+ [0][1][2][0][RTW89_FCC][10] = 70,
+ [0][1][2][0][RTW89_ETSI][10] = 48,
+ [0][1][2][0][RTW89_MKK][10] = 50,
+ [0][1][2][0][RTW89_IC][10] = 52,
+ [0][1][2][0][RTW89_KCC][10] = 62,
+ [0][1][2][0][RTW89_ACMA][10] = 48,
+ [0][1][2][0][RTW89_CHILE][10] = 50,
+ [0][1][2][0][RTW89_UKRAINE][10] = 40,
+ [0][1][2][0][RTW89_MEXICO][10] = 70,
+ [0][1][2][0][RTW89_CN][10] = 48,
+ [0][1][2][0][RTW89_QATAR][10] = 48,
+ [0][1][2][0][RTW89_UK][10] = 48,
+ [0][1][2][0][RTW89_FCC][12] = 70,
+ [0][1][2][0][RTW89_ETSI][12] = 48,
+ [0][1][2][0][RTW89_MKK][12] = 50,
+ [0][1][2][0][RTW89_IC][12] = 52,
+ [0][1][2][0][RTW89_KCC][12] = 62,
+ [0][1][2][0][RTW89_ACMA][12] = 48,
+ [0][1][2][0][RTW89_CHILE][12] = 50,
+ [0][1][2][0][RTW89_UKRAINE][12] = 40,
+ [0][1][2][0][RTW89_MEXICO][12] = 70,
+ [0][1][2][0][RTW89_CN][12] = 48,
+ [0][1][2][0][RTW89_QATAR][12] = 48,
+ [0][1][2][0][RTW89_UK][12] = 48,
+ [0][1][2][0][RTW89_FCC][14] = 70,
+ [0][1][2][0][RTW89_ETSI][14] = 48,
+ [0][1][2][0][RTW89_MKK][14] = 50,
+ [0][1][2][0][RTW89_IC][14] = 52,
+ [0][1][2][0][RTW89_KCC][14] = 62,
+ [0][1][2][0][RTW89_ACMA][14] = 48,
+ [0][1][2][0][RTW89_CHILE][14] = 50,
+ [0][1][2][0][RTW89_UKRAINE][14] = 40,
+ [0][1][2][0][RTW89_MEXICO][14] = 70,
+ [0][1][2][0][RTW89_CN][14] = 48,
+ [0][1][2][0][RTW89_QATAR][14] = 48,
+ [0][1][2][0][RTW89_UK][14] = 48,
+ [0][1][2][0][RTW89_FCC][15] = 68,
+ [0][1][2][0][RTW89_ETSI][15] = 48,
+ [0][1][2][0][RTW89_MKK][15] = 70,
+ [0][1][2][0][RTW89_IC][15] = 68,
+ [0][1][2][0][RTW89_KCC][15] = 62,
+ [0][1][2][0][RTW89_ACMA][15] = 48,
+ [0][1][2][0][RTW89_CHILE][15] = 48,
+ [0][1][2][0][RTW89_UKRAINE][15] = 40,
+ [0][1][2][0][RTW89_MEXICO][15] = 68,
+ [0][1][2][0][RTW89_CN][15] = 127,
+ [0][1][2][0][RTW89_QATAR][15] = 48,
+ [0][1][2][0][RTW89_UK][15] = 48,
+ [0][1][2][0][RTW89_FCC][17] = 70,
+ [0][1][2][0][RTW89_ETSI][17] = 48,
+ [0][1][2][0][RTW89_MKK][17] = 70,
+ [0][1][2][0][RTW89_IC][17] = 70,
+ [0][1][2][0][RTW89_KCC][17] = 62,
+ [0][1][2][0][RTW89_ACMA][17] = 48,
+ [0][1][2][0][RTW89_CHILE][17] = 48,
+ [0][1][2][0][RTW89_UKRAINE][17] = 40,
+ [0][1][2][0][RTW89_MEXICO][17] = 70,
+ [0][1][2][0][RTW89_CN][17] = 127,
+ [0][1][2][0][RTW89_QATAR][17] = 48,
+ [0][1][2][0][RTW89_UK][17] = 48,
+ [0][1][2][0][RTW89_FCC][19] = 70,
+ [0][1][2][0][RTW89_ETSI][19] = 48,
+ [0][1][2][0][RTW89_MKK][19] = 70,
+ [0][1][2][0][RTW89_IC][19] = 70,
+ [0][1][2][0][RTW89_KCC][19] = 62,
+ [0][1][2][0][RTW89_ACMA][19] = 48,
+ [0][1][2][0][RTW89_CHILE][19] = 48,
+ [0][1][2][0][RTW89_UKRAINE][19] = 40,
+ [0][1][2][0][RTW89_MEXICO][19] = 70,
+ [0][1][2][0][RTW89_CN][19] = 127,
+ [0][1][2][0][RTW89_QATAR][19] = 48,
+ [0][1][2][0][RTW89_UK][19] = 48,
+ [0][1][2][0][RTW89_FCC][21] = 70,
+ [0][1][2][0][RTW89_ETSI][21] = 48,
+ [0][1][2][0][RTW89_MKK][21] = 70,
+ [0][1][2][0][RTW89_IC][21] = 70,
+ [0][1][2][0][RTW89_KCC][21] = 62,
+ [0][1][2][0][RTW89_ACMA][21] = 48,
+ [0][1][2][0][RTW89_CHILE][21] = 48,
+ [0][1][2][0][RTW89_UKRAINE][21] = 40,
+ [0][1][2][0][RTW89_MEXICO][21] = 70,
+ [0][1][2][0][RTW89_CN][21] = 127,
+ [0][1][2][0][RTW89_QATAR][21] = 48,
+ [0][1][2][0][RTW89_UK][21] = 48,
+ [0][1][2][0][RTW89_FCC][23] = 70,
+ [0][1][2][0][RTW89_ETSI][23] = 48,
+ [0][1][2][0][RTW89_MKK][23] = 70,
+ [0][1][2][0][RTW89_IC][23] = 70,
+ [0][1][2][0][RTW89_KCC][23] = 62,
+ [0][1][2][0][RTW89_ACMA][23] = 48,
+ [0][1][2][0][RTW89_CHILE][23] = 48,
+ [0][1][2][0][RTW89_UKRAINE][23] = 40,
+ [0][1][2][0][RTW89_MEXICO][23] = 70,
+ [0][1][2][0][RTW89_CN][23] = 127,
+ [0][1][2][0][RTW89_QATAR][23] = 48,
+ [0][1][2][0][RTW89_UK][23] = 48,
+ [0][1][2][0][RTW89_FCC][25] = 70,
+ [0][1][2][0][RTW89_ETSI][25] = 48,
+ [0][1][2][0][RTW89_MKK][25] = 70,
+ [0][1][2][0][RTW89_IC][25] = 127,
+ [0][1][2][0][RTW89_KCC][25] = 62,
+ [0][1][2][0][RTW89_ACMA][25] = 127,
+ [0][1][2][0][RTW89_CHILE][25] = 48,
+ [0][1][2][0][RTW89_UKRAINE][25] = 40,
+ [0][1][2][0][RTW89_MEXICO][25] = 70,
+ [0][1][2][0][RTW89_CN][25] = 127,
+ [0][1][2][0][RTW89_QATAR][25] = 48,
+ [0][1][2][0][RTW89_UK][25] = 48,
+ [0][1][2][0][RTW89_FCC][27] = 70,
+ [0][1][2][0][RTW89_ETSI][27] = 48,
+ [0][1][2][0][RTW89_MKK][27] = 70,
+ [0][1][2][0][RTW89_IC][27] = 127,
+ [0][1][2][0][RTW89_KCC][27] = 62,
+ [0][1][2][0][RTW89_ACMA][27] = 127,
+ [0][1][2][0][RTW89_CHILE][27] = 50,
+ [0][1][2][0][RTW89_UKRAINE][27] = 40,
+ [0][1][2][0][RTW89_MEXICO][27] = 70,
+ [0][1][2][0][RTW89_CN][27] = 127,
+ [0][1][2][0][RTW89_QATAR][27] = 48,
+ [0][1][2][0][RTW89_UK][27] = 48,
+ [0][1][2][0][RTW89_FCC][29] = 70,
+ [0][1][2][0][RTW89_ETSI][29] = 48,
+ [0][1][2][0][RTW89_MKK][29] = 70,
+ [0][1][2][0][RTW89_IC][29] = 127,
+ [0][1][2][0][RTW89_KCC][29] = 62,
+ [0][1][2][0][RTW89_ACMA][29] = 127,
+ [0][1][2][0][RTW89_CHILE][29] = 50,
+ [0][1][2][0][RTW89_UKRAINE][29] = 40,
+ [0][1][2][0][RTW89_MEXICO][29] = 70,
+ [0][1][2][0][RTW89_CN][29] = 127,
+ [0][1][2][0][RTW89_QATAR][29] = 48,
+ [0][1][2][0][RTW89_UK][29] = 48,
+ [0][1][2][0][RTW89_FCC][31] = 70,
+ [0][1][2][0][RTW89_ETSI][31] = 48,
+ [0][1][2][0][RTW89_MKK][31] = 70,
+ [0][1][2][0][RTW89_IC][31] = 70,
+ [0][1][2][0][RTW89_KCC][31] = 62,
+ [0][1][2][0][RTW89_ACMA][31] = 48,
+ [0][1][2][0][RTW89_CHILE][31] = 50,
+ [0][1][2][0][RTW89_UKRAINE][31] = 40,
+ [0][1][2][0][RTW89_MEXICO][31] = 70,
+ [0][1][2][0][RTW89_CN][31] = 127,
+ [0][1][2][0][RTW89_QATAR][31] = 48,
+ [0][1][2][0][RTW89_UK][31] = 48,
+ [0][1][2][0][RTW89_FCC][33] = 70,
+ [0][1][2][0][RTW89_ETSI][33] = 48,
+ [0][1][2][0][RTW89_MKK][33] = 70,
+ [0][1][2][0][RTW89_IC][33] = 70,
+ [0][1][2][0][RTW89_KCC][33] = 62,
+ [0][1][2][0][RTW89_ACMA][33] = 48,
+ [0][1][2][0][RTW89_CHILE][33] = 50,
+ [0][1][2][0][RTW89_UKRAINE][33] = 40,
+ [0][1][2][0][RTW89_MEXICO][33] = 70,
+ [0][1][2][0][RTW89_CN][33] = 127,
+ [0][1][2][0][RTW89_QATAR][33] = 48,
+ [0][1][2][0][RTW89_UK][33] = 48,
+ [0][1][2][0][RTW89_FCC][35] = 66,
+ [0][1][2][0][RTW89_ETSI][35] = 48,
+ [0][1][2][0][RTW89_MKK][35] = 70,
+ [0][1][2][0][RTW89_IC][35] = 66,
+ [0][1][2][0][RTW89_KCC][35] = 62,
+ [0][1][2][0][RTW89_ACMA][35] = 48,
+ [0][1][2][0][RTW89_CHILE][35] = 50,
+ [0][1][2][0][RTW89_UKRAINE][35] = 40,
+ [0][1][2][0][RTW89_MEXICO][35] = 66,
+ [0][1][2][0][RTW89_CN][35] = 127,
+ [0][1][2][0][RTW89_QATAR][35] = 48,
+ [0][1][2][0][RTW89_UK][35] = 48,
+ [0][1][2][0][RTW89_FCC][37] = 70,
+ [0][1][2][0][RTW89_ETSI][37] = 127,
+ [0][1][2][0][RTW89_MKK][37] = 70,
+ [0][1][2][0][RTW89_IC][37] = 70,
+ [0][1][2][0][RTW89_KCC][37] = 62,
+ [0][1][2][0][RTW89_ACMA][37] = 70,
+ [0][1][2][0][RTW89_CHILE][37] = 50,
+ [0][1][2][0][RTW89_UKRAINE][37] = 127,
+ [0][1][2][0][RTW89_MEXICO][37] = 70,
+ [0][1][2][0][RTW89_CN][37] = 127,
+ [0][1][2][0][RTW89_QATAR][37] = 127,
+ [0][1][2][0][RTW89_UK][37] = 76,
+ [0][1][2][0][RTW89_FCC][38] = 78,
+ [0][1][2][0][RTW89_ETSI][38] = 16,
+ [0][1][2][0][RTW89_MKK][38] = 127,
+ [0][1][2][0][RTW89_IC][38] = 78,
+ [0][1][2][0][RTW89_KCC][38] = 62,
+ [0][1][2][0][RTW89_ACMA][38] = 74,
+ [0][1][2][0][RTW89_CHILE][38] = 50,
+ [0][1][2][0][RTW89_UKRAINE][38] = 16,
+ [0][1][2][0][RTW89_MEXICO][38] = 78,
+ [0][1][2][0][RTW89_CN][38] = 76,
+ [0][1][2][0][RTW89_QATAR][38] = 16,
+ [0][1][2][0][RTW89_UK][38] = 48,
+ [0][1][2][0][RTW89_FCC][40] = 78,
+ [0][1][2][0][RTW89_ETSI][40] = 16,
+ [0][1][2][0][RTW89_MKK][40] = 127,
+ [0][1][2][0][RTW89_IC][40] = 78,
+ [0][1][2][0][RTW89_KCC][40] = 62,
+ [0][1][2][0][RTW89_ACMA][40] = 74,
+ [0][1][2][0][RTW89_CHILE][40] = 50,
+ [0][1][2][0][RTW89_UKRAINE][40] = 16,
+ [0][1][2][0][RTW89_MEXICO][40] = 78,
+ [0][1][2][0][RTW89_CN][40] = 76,
+ [0][1][2][0][RTW89_QATAR][40] = 16,
+ [0][1][2][0][RTW89_UK][40] = 48,
+ [0][1][2][0][RTW89_FCC][42] = 78,
+ [0][1][2][0][RTW89_ETSI][42] = 16,
+ [0][1][2][0][RTW89_MKK][42] = 127,
+ [0][1][2][0][RTW89_IC][42] = 78,
+ [0][1][2][0][RTW89_KCC][42] = 62,
+ [0][1][2][0][RTW89_ACMA][42] = 76,
+ [0][1][2][0][RTW89_CHILE][42] = 52,
+ [0][1][2][0][RTW89_UKRAINE][42] = 16,
+ [0][1][2][0][RTW89_MEXICO][42] = 78,
+ [0][1][2][0][RTW89_CN][42] = 76,
+ [0][1][2][0][RTW89_QATAR][42] = 16,
+ [0][1][2][0][RTW89_UK][42] = 48,
+ [0][1][2][0][RTW89_FCC][44] = 78,
+ [0][1][2][0][RTW89_ETSI][44] = 16,
+ [0][1][2][0][RTW89_MKK][44] = 127,
+ [0][1][2][0][RTW89_IC][44] = 78,
+ [0][1][2][0][RTW89_KCC][44] = 62,
+ [0][1][2][0][RTW89_ACMA][44] = 76,
+ [0][1][2][0][RTW89_CHILE][44] = 52,
+ [0][1][2][0][RTW89_UKRAINE][44] = 16,
+ [0][1][2][0][RTW89_MEXICO][44] = 78,
+ [0][1][2][0][RTW89_CN][44] = 76,
+ [0][1][2][0][RTW89_QATAR][44] = 16,
+ [0][1][2][0][RTW89_UK][44] = 48,
+ [0][1][2][0][RTW89_FCC][46] = 78,
+ [0][1][2][0][RTW89_ETSI][46] = 16,
+ [0][1][2][0][RTW89_MKK][46] = 127,
+ [0][1][2][0][RTW89_IC][46] = 78,
+ [0][1][2][0][RTW89_KCC][46] = 62,
+ [0][1][2][0][RTW89_ACMA][46] = 76,
+ [0][1][2][0][RTW89_CHILE][46] = 52,
+ [0][1][2][0][RTW89_UKRAINE][46] = 16,
+ [0][1][2][0][RTW89_MEXICO][46] = 78,
+ [0][1][2][0][RTW89_CN][46] = 76,
+ [0][1][2][0][RTW89_QATAR][46] = 16,
+ [0][1][2][0][RTW89_UK][46] = 48,
+ [0][1][2][0][RTW89_FCC][48] = 58,
+ [0][1][2][0][RTW89_ETSI][48] = 127,
+ [0][1][2][0][RTW89_MKK][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_KCC][48] = 127,
+ [0][1][2][0][RTW89_ACMA][48] = 127,
+ [0][1][2][0][RTW89_CHILE][48] = 127,
+ [0][1][2][0][RTW89_UKRAINE][48] = 127,
+ [0][1][2][0][RTW89_MEXICO][48] = 127,
+ [0][1][2][0][RTW89_CN][48] = 127,
+ [0][1][2][0][RTW89_QATAR][48] = 127,
+ [0][1][2][0][RTW89_UK][48] = 127,
+ [0][1][2][0][RTW89_FCC][50] = 58,
+ [0][1][2][0][RTW89_ETSI][50] = 127,
+ [0][1][2][0][RTW89_MKK][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_KCC][50] = 127,
+ [0][1][2][0][RTW89_ACMA][50] = 127,
+ [0][1][2][0][RTW89_CHILE][50] = 127,
+ [0][1][2][0][RTW89_UKRAINE][50] = 127,
+ [0][1][2][0][RTW89_MEXICO][50] = 127,
+ [0][1][2][0][RTW89_CN][50] = 127,
+ [0][1][2][0][RTW89_QATAR][50] = 127,
+ [0][1][2][0][RTW89_UK][50] = 127,
+ [0][1][2][0][RTW89_FCC][52] = 58,
+ [0][1][2][0][RTW89_ETSI][52] = 127,
+ [0][1][2][0][RTW89_MKK][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_KCC][52] = 127,
+ [0][1][2][0][RTW89_ACMA][52] = 127,
+ [0][1][2][0][RTW89_CHILE][52] = 127,
+ [0][1][2][0][RTW89_UKRAINE][52] = 127,
+ [0][1][2][0][RTW89_MEXICO][52] = 127,
+ [0][1][2][0][RTW89_CN][52] = 127,
+ [0][1][2][0][RTW89_QATAR][52] = 127,
+ [0][1][2][0][RTW89_UK][52] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 68,
+ [0][1][2][1][RTW89_ETSI][0] = 36,
+ [0][1][2][1][RTW89_MKK][0] = 50,
+ [0][1][2][1][RTW89_IC][0] = 40,
+ [0][1][2][1][RTW89_KCC][0] = 62,
+ [0][1][2][1][RTW89_ACMA][0] = 36,
+ [0][1][2][1][RTW89_CHILE][0] = 14,
+ [0][1][2][1][RTW89_UKRAINE][0] = 28,
+ [0][1][2][1][RTW89_MEXICO][0] = 50,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_QATAR][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 36,
+ [0][1][2][1][RTW89_FCC][2] = 68,
+ [0][1][2][1][RTW89_ETSI][2] = 36,
+ [0][1][2][1][RTW89_MKK][2] = 50,
+ [0][1][2][1][RTW89_IC][2] = 40,
+ [0][1][2][1][RTW89_KCC][2] = 62,
+ [0][1][2][1][RTW89_ACMA][2] = 36,
+ [0][1][2][1][RTW89_CHILE][2] = 14,
+ [0][1][2][1][RTW89_UKRAINE][2] = 28,
+ [0][1][2][1][RTW89_MEXICO][2] = 50,
+ [0][1][2][1][RTW89_CN][2] = 36,
+ [0][1][2][1][RTW89_QATAR][2] = 36,
+ [0][1][2][1][RTW89_UK][2] = 36,
+ [0][1][2][1][RTW89_FCC][4] = 68,
+ [0][1][2][1][RTW89_ETSI][4] = 36,
+ [0][1][2][1][RTW89_MKK][4] = 50,
+ [0][1][2][1][RTW89_IC][4] = 40,
+ [0][1][2][1][RTW89_KCC][4] = 62,
+ [0][1][2][1][RTW89_ACMA][4] = 36,
+ [0][1][2][1][RTW89_CHILE][4] = 14,
+ [0][1][2][1][RTW89_UKRAINE][4] = 28,
+ [0][1][2][1][RTW89_MEXICO][4] = 50,
+ [0][1][2][1][RTW89_CN][4] = 36,
+ [0][1][2][1][RTW89_QATAR][4] = 36,
+ [0][1][2][1][RTW89_UK][4] = 36,
+ [0][1][2][1][RTW89_FCC][6] = 68,
+ [0][1][2][1][RTW89_ETSI][6] = 36,
+ [0][1][2][1][RTW89_MKK][6] = 50,
+ [0][1][2][1][RTW89_IC][6] = 40,
+ [0][1][2][1][RTW89_KCC][6] = 34,
+ [0][1][2][1][RTW89_ACMA][6] = 36,
+ [0][1][2][1][RTW89_CHILE][6] = 14,
+ [0][1][2][1][RTW89_UKRAINE][6] = 28,
+ [0][1][2][1][RTW89_MEXICO][6] = 50,
+ [0][1][2][1][RTW89_CN][6] = 36,
+ [0][1][2][1][RTW89_QATAR][6] = 36,
+ [0][1][2][1][RTW89_UK][6] = 36,
+ [0][1][2][1][RTW89_FCC][8] = 68,
+ [0][1][2][1][RTW89_ETSI][8] = 36,
+ [0][1][2][1][RTW89_MKK][8] = 50,
+ [0][1][2][1][RTW89_IC][8] = 40,
+ [0][1][2][1][RTW89_KCC][8] = 62,
+ [0][1][2][1][RTW89_ACMA][8] = 36,
+ [0][1][2][1][RTW89_CHILE][8] = 36,
+ [0][1][2][1][RTW89_UKRAINE][8] = 28,
+ [0][1][2][1][RTW89_MEXICO][8] = 68,
+ [0][1][2][1][RTW89_CN][8] = 36,
+ [0][1][2][1][RTW89_QATAR][8] = 36,
+ [0][1][2][1][RTW89_UK][8] = 36,
+ [0][1][2][1][RTW89_FCC][10] = 68,
+ [0][1][2][1][RTW89_ETSI][10] = 36,
+ [0][1][2][1][RTW89_MKK][10] = 50,
+ [0][1][2][1][RTW89_IC][10] = 40,
+ [0][1][2][1][RTW89_KCC][10] = 62,
+ [0][1][2][1][RTW89_ACMA][10] = 36,
+ [0][1][2][1][RTW89_CHILE][10] = 36,
+ [0][1][2][1][RTW89_UKRAINE][10] = 28,
+ [0][1][2][1][RTW89_MEXICO][10] = 68,
+ [0][1][2][1][RTW89_CN][10] = 36,
+ [0][1][2][1][RTW89_QATAR][10] = 36,
+ [0][1][2][1][RTW89_UK][10] = 36,
+ [0][1][2][1][RTW89_FCC][12] = 68,
+ [0][1][2][1][RTW89_ETSI][12] = 36,
+ [0][1][2][1][RTW89_MKK][12] = 50,
+ [0][1][2][1][RTW89_IC][12] = 40,
+ [0][1][2][1][RTW89_KCC][12] = 62,
+ [0][1][2][1][RTW89_ACMA][12] = 36,
+ [0][1][2][1][RTW89_CHILE][12] = 36,
+ [0][1][2][1][RTW89_UKRAINE][12] = 28,
+ [0][1][2][1][RTW89_MEXICO][12] = 68,
+ [0][1][2][1][RTW89_CN][12] = 36,
+ [0][1][2][1][RTW89_QATAR][12] = 36,
+ [0][1][2][1][RTW89_UK][12] = 36,
+ [0][1][2][1][RTW89_FCC][14] = 68,
+ [0][1][2][1][RTW89_ETSI][14] = 36,
+ [0][1][2][1][RTW89_MKK][14] = 50,
+ [0][1][2][1][RTW89_IC][14] = 40,
+ [0][1][2][1][RTW89_KCC][14] = 62,
+ [0][1][2][1][RTW89_ACMA][14] = 36,
+ [0][1][2][1][RTW89_CHILE][14] = 36,
+ [0][1][2][1][RTW89_UKRAINE][14] = 28,
+ [0][1][2][1][RTW89_MEXICO][14] = 68,
+ [0][1][2][1][RTW89_CN][14] = 36,
+ [0][1][2][1][RTW89_QATAR][14] = 36,
+ [0][1][2][1][RTW89_UK][14] = 36,
+ [0][1][2][1][RTW89_FCC][15] = 68,
+ [0][1][2][1][RTW89_ETSI][15] = 36,
+ [0][1][2][1][RTW89_MKK][15] = 70,
+ [0][1][2][1][RTW89_IC][15] = 68,
+ [0][1][2][1][RTW89_KCC][15] = 62,
+ [0][1][2][1][RTW89_ACMA][15] = 36,
+ [0][1][2][1][RTW89_CHILE][15] = 36,
+ [0][1][2][1][RTW89_UKRAINE][15] = 28,
+ [0][1][2][1][RTW89_MEXICO][15] = 68,
+ [0][1][2][1][RTW89_CN][15] = 127,
+ [0][1][2][1][RTW89_QATAR][15] = 36,
+ [0][1][2][1][RTW89_UK][15] = 36,
+ [0][1][2][1][RTW89_FCC][17] = 68,
+ [0][1][2][1][RTW89_ETSI][17] = 36,
+ [0][1][2][1][RTW89_MKK][17] = 70,
+ [0][1][2][1][RTW89_IC][17] = 68,
+ [0][1][2][1][RTW89_KCC][17] = 62,
+ [0][1][2][1][RTW89_ACMA][17] = 36,
+ [0][1][2][1][RTW89_CHILE][17] = 36,
+ [0][1][2][1][RTW89_UKRAINE][17] = 28,
+ [0][1][2][1][RTW89_MEXICO][17] = 68,
+ [0][1][2][1][RTW89_CN][17] = 127,
+ [0][1][2][1][RTW89_QATAR][17] = 36,
+ [0][1][2][1][RTW89_UK][17] = 36,
+ [0][1][2][1][RTW89_FCC][19] = 68,
+ [0][1][2][1][RTW89_ETSI][19] = 36,
+ [0][1][2][1][RTW89_MKK][19] = 70,
+ [0][1][2][1][RTW89_IC][19] = 68,
+ [0][1][2][1][RTW89_KCC][19] = 62,
+ [0][1][2][1][RTW89_ACMA][19] = 36,
+ [0][1][2][1][RTW89_CHILE][19] = 36,
+ [0][1][2][1][RTW89_UKRAINE][19] = 28,
+ [0][1][2][1][RTW89_MEXICO][19] = 68,
+ [0][1][2][1][RTW89_CN][19] = 127,
+ [0][1][2][1][RTW89_QATAR][19] = 36,
+ [0][1][2][1][RTW89_UK][19] = 36,
+ [0][1][2][1][RTW89_FCC][21] = 68,
+ [0][1][2][1][RTW89_ETSI][21] = 36,
+ [0][1][2][1][RTW89_MKK][21] = 70,
+ [0][1][2][1][RTW89_IC][21] = 68,
+ [0][1][2][1][RTW89_KCC][21] = 62,
+ [0][1][2][1][RTW89_ACMA][21] = 36,
+ [0][1][2][1][RTW89_CHILE][21] = 36,
+ [0][1][2][1][RTW89_UKRAINE][21] = 28,
+ [0][1][2][1][RTW89_MEXICO][21] = 68,
+ [0][1][2][1][RTW89_CN][21] = 127,
+ [0][1][2][1][RTW89_QATAR][21] = 36,
+ [0][1][2][1][RTW89_UK][21] = 36,
+ [0][1][2][1][RTW89_FCC][23] = 68,
+ [0][1][2][1][RTW89_ETSI][23] = 36,
+ [0][1][2][1][RTW89_MKK][23] = 70,
+ [0][1][2][1][RTW89_IC][23] = 68,
+ [0][1][2][1][RTW89_KCC][23] = 62,
+ [0][1][2][1][RTW89_ACMA][23] = 36,
+ [0][1][2][1][RTW89_CHILE][23] = 36,
+ [0][1][2][1][RTW89_UKRAINE][23] = 28,
+ [0][1][2][1][RTW89_MEXICO][23] = 68,
+ [0][1][2][1][RTW89_CN][23] = 127,
+ [0][1][2][1][RTW89_QATAR][23] = 36,
+ [0][1][2][1][RTW89_UK][23] = 36,
+ [0][1][2][1][RTW89_FCC][25] = 66,
+ [0][1][2][1][RTW89_ETSI][25] = 36,
+ [0][1][2][1][RTW89_MKK][25] = 70,
+ [0][1][2][1][RTW89_IC][25] = 127,
+ [0][1][2][1][RTW89_KCC][25] = 62,
+ [0][1][2][1][RTW89_ACMA][25] = 127,
+ [0][1][2][1][RTW89_CHILE][25] = 36,
+ [0][1][2][1][RTW89_UKRAINE][25] = 28,
+ [0][1][2][1][RTW89_MEXICO][25] = 66,
+ [0][1][2][1][RTW89_CN][25] = 127,
+ [0][1][2][1][RTW89_QATAR][25] = 36,
+ [0][1][2][1][RTW89_UK][25] = 36,
+ [0][1][2][1][RTW89_FCC][27] = 66,
+ [0][1][2][1][RTW89_ETSI][27] = 36,
+ [0][1][2][1][RTW89_MKK][27] = 70,
+ [0][1][2][1][RTW89_IC][27] = 127,
+ [0][1][2][1][RTW89_KCC][27] = 62,
+ [0][1][2][1][RTW89_ACMA][27] = 127,
+ [0][1][2][1][RTW89_CHILE][27] = 36,
+ [0][1][2][1][RTW89_UKRAINE][27] = 28,
+ [0][1][2][1][RTW89_MEXICO][27] = 66,
+ [0][1][2][1][RTW89_CN][27] = 127,
+ [0][1][2][1][RTW89_QATAR][27] = 36,
+ [0][1][2][1][RTW89_UK][27] = 36,
+ [0][1][2][1][RTW89_FCC][29] = 66,
+ [0][1][2][1][RTW89_ETSI][29] = 36,
+ [0][1][2][1][RTW89_MKK][29] = 70,
+ [0][1][2][1][RTW89_IC][29] = 127,
+ [0][1][2][1][RTW89_KCC][29] = 62,
+ [0][1][2][1][RTW89_ACMA][29] = 127,
+ [0][1][2][1][RTW89_CHILE][29] = 36,
+ [0][1][2][1][RTW89_UKRAINE][29] = 28,
+ [0][1][2][1][RTW89_MEXICO][29] = 66,
+ [0][1][2][1][RTW89_CN][29] = 127,
+ [0][1][2][1][RTW89_QATAR][29] = 36,
+ [0][1][2][1][RTW89_UK][29] = 36,
+ [0][1][2][1][RTW89_FCC][31] = 66,
+ [0][1][2][1][RTW89_ETSI][31] = 36,
+ [0][1][2][1][RTW89_MKK][31] = 70,
+ [0][1][2][1][RTW89_IC][31] = 66,
+ [0][1][2][1][RTW89_KCC][31] = 62,
+ [0][1][2][1][RTW89_ACMA][31] = 36,
+ [0][1][2][1][RTW89_CHILE][31] = 36,
+ [0][1][2][1][RTW89_UKRAINE][31] = 28,
+ [0][1][2][1][RTW89_MEXICO][31] = 66,
+ [0][1][2][1][RTW89_CN][31] = 127,
+ [0][1][2][1][RTW89_QATAR][31] = 36,
+ [0][1][2][1][RTW89_UK][31] = 36,
+ [0][1][2][1][RTW89_FCC][33] = 66,
+ [0][1][2][1][RTW89_ETSI][33] = 36,
+ [0][1][2][1][RTW89_MKK][33] = 70,
+ [0][1][2][1][RTW89_IC][33] = 66,
+ [0][1][2][1][RTW89_KCC][33] = 62,
+ [0][1][2][1][RTW89_ACMA][33] = 36,
+ [0][1][2][1][RTW89_CHILE][33] = 36,
+ [0][1][2][1][RTW89_UKRAINE][33] = 28,
+ [0][1][2][1][RTW89_MEXICO][33] = 66,
+ [0][1][2][1][RTW89_CN][33] = 127,
+ [0][1][2][1][RTW89_QATAR][33] = 36,
+ [0][1][2][1][RTW89_UK][33] = 36,
+ [0][1][2][1][RTW89_FCC][35] = 66,
+ [0][1][2][1][RTW89_ETSI][35] = 36,
+ [0][1][2][1][RTW89_MKK][35] = 70,
+ [0][1][2][1][RTW89_IC][35] = 66,
+ [0][1][2][1][RTW89_KCC][35] = 62,
+ [0][1][2][1][RTW89_ACMA][35] = 36,
+ [0][1][2][1][RTW89_CHILE][35] = 36,
+ [0][1][2][1][RTW89_UKRAINE][35] = 28,
+ [0][1][2][1][RTW89_MEXICO][35] = 66,
+ [0][1][2][1][RTW89_CN][35] = 127,
+ [0][1][2][1][RTW89_QATAR][35] = 36,
+ [0][1][2][1][RTW89_UK][35] = 36,
+ [0][1][2][1][RTW89_FCC][37] = 68,
+ [0][1][2][1][RTW89_ETSI][37] = 127,
+ [0][1][2][1][RTW89_MKK][37] = 70,
+ [0][1][2][1][RTW89_IC][37] = 68,
+ [0][1][2][1][RTW89_KCC][37] = 62,
+ [0][1][2][1][RTW89_ACMA][37] = 70,
+ [0][1][2][1][RTW89_CHILE][37] = 36,
+ [0][1][2][1][RTW89_UKRAINE][37] = 127,
+ [0][1][2][1][RTW89_MEXICO][37] = 68,
+ [0][1][2][1][RTW89_CN][37] = 127,
+ [0][1][2][1][RTW89_QATAR][37] = 127,
+ [0][1][2][1][RTW89_UK][37] = 62,
+ [0][1][2][1][RTW89_FCC][38] = 78,
+ [0][1][2][1][RTW89_ETSI][38] = 4,
+ [0][1][2][1][RTW89_MKK][38] = 127,
+ [0][1][2][1][RTW89_IC][38] = 78,
+ [0][1][2][1][RTW89_KCC][38] = 62,
+ [0][1][2][1][RTW89_ACMA][38] = 74,
+ [0][1][2][1][RTW89_CHILE][38] = 36,
+ [0][1][2][1][RTW89_UKRAINE][38] = 4,
+ [0][1][2][1][RTW89_MEXICO][38] = 78,
+ [0][1][2][1][RTW89_CN][38] = 72,
+ [0][1][2][1][RTW89_QATAR][38] = 4,
+ [0][1][2][1][RTW89_UK][38] = 36,
+ [0][1][2][1][RTW89_FCC][40] = 78,
+ [0][1][2][1][RTW89_ETSI][40] = 4,
+ [0][1][2][1][RTW89_MKK][40] = 127,
+ [0][1][2][1][RTW89_IC][40] = 78,
+ [0][1][2][1][RTW89_KCC][40] = 62,
+ [0][1][2][1][RTW89_ACMA][40] = 74,
+ [0][1][2][1][RTW89_CHILE][40] = 36,
+ [0][1][2][1][RTW89_UKRAINE][40] = 4,
+ [0][1][2][1][RTW89_MEXICO][40] = 78,
+ [0][1][2][1][RTW89_CN][40] = 72,
+ [0][1][2][1][RTW89_QATAR][40] = 4,
+ [0][1][2][1][RTW89_UK][40] = 36,
+ [0][1][2][1][RTW89_FCC][42] = 78,
+ [0][1][2][1][RTW89_ETSI][42] = 4,
+ [0][1][2][1][RTW89_MKK][42] = 127,
+ [0][1][2][1][RTW89_IC][42] = 78,
+ [0][1][2][1][RTW89_KCC][42] = 62,
+ [0][1][2][1][RTW89_ACMA][42] = 76,
+ [0][1][2][1][RTW89_CHILE][42] = 36,
+ [0][1][2][1][RTW89_UKRAINE][42] = 4,
+ [0][1][2][1][RTW89_MEXICO][42] = 78,
+ [0][1][2][1][RTW89_CN][42] = 72,
+ [0][1][2][1][RTW89_QATAR][42] = 4,
+ [0][1][2][1][RTW89_UK][42] = 36,
+ [0][1][2][1][RTW89_FCC][44] = 78,
+ [0][1][2][1][RTW89_ETSI][44] = 4,
+ [0][1][2][1][RTW89_MKK][44] = 127,
+ [0][1][2][1][RTW89_IC][44] = 78,
+ [0][1][2][1][RTW89_KCC][44] = 62,
+ [0][1][2][1][RTW89_ACMA][44] = 76,
+ [0][1][2][1][RTW89_CHILE][44] = 36,
+ [0][1][2][1][RTW89_UKRAINE][44] = 4,
+ [0][1][2][1][RTW89_MEXICO][44] = 78,
+ [0][1][2][1][RTW89_CN][44] = 76,
+ [0][1][2][1][RTW89_QATAR][44] = 4,
+ [0][1][2][1][RTW89_UK][44] = 36,
+ [0][1][2][1][RTW89_FCC][46] = 78,
+ [0][1][2][1][RTW89_ETSI][46] = 4,
+ [0][1][2][1][RTW89_MKK][46] = 127,
+ [0][1][2][1][RTW89_IC][46] = 78,
+ [0][1][2][1][RTW89_KCC][46] = 62,
+ [0][1][2][1][RTW89_ACMA][46] = 76,
+ [0][1][2][1][RTW89_CHILE][46] = 36,
+ [0][1][2][1][RTW89_UKRAINE][46] = 4,
+ [0][1][2][1][RTW89_MEXICO][46] = 78,
+ [0][1][2][1][RTW89_CN][46] = 76,
+ [0][1][2][1][RTW89_QATAR][46] = 4,
+ [0][1][2][1][RTW89_UK][46] = 36,
+ [0][1][2][1][RTW89_FCC][48] = 58,
+ [0][1][2][1][RTW89_ETSI][48] = 127,
+ [0][1][2][1][RTW89_MKK][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_KCC][48] = 127,
+ [0][1][2][1][RTW89_ACMA][48] = 127,
+ [0][1][2][1][RTW89_CHILE][48] = 127,
+ [0][1][2][1][RTW89_UKRAINE][48] = 127,
+ [0][1][2][1][RTW89_MEXICO][48] = 127,
+ [0][1][2][1][RTW89_CN][48] = 127,
+ [0][1][2][1][RTW89_QATAR][48] = 127,
+ [0][1][2][1][RTW89_UK][48] = 127,
+ [0][1][2][1][RTW89_FCC][50] = 58,
+ [0][1][2][1][RTW89_ETSI][50] = 127,
+ [0][1][2][1][RTW89_MKK][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_KCC][50] = 127,
+ [0][1][2][1][RTW89_ACMA][50] = 127,
+ [0][1][2][1][RTW89_CHILE][50] = 127,
+ [0][1][2][1][RTW89_UKRAINE][50] = 127,
+ [0][1][2][1][RTW89_MEXICO][50] = 127,
+ [0][1][2][1][RTW89_CN][50] = 127,
+ [0][1][2][1][RTW89_QATAR][50] = 127,
+ [0][1][2][1][RTW89_UK][50] = 127,
+ [0][1][2][1][RTW89_FCC][52] = 58,
+ [0][1][2][1][RTW89_ETSI][52] = 127,
+ [0][1][2][1][RTW89_MKK][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_KCC][52] = 127,
+ [0][1][2][1][RTW89_ACMA][52] = 127,
+ [0][1][2][1][RTW89_CHILE][52] = 127,
+ [0][1][2][1][RTW89_UKRAINE][52] = 127,
+ [0][1][2][1][RTW89_MEXICO][52] = 127,
+ [0][1][2][1][RTW89_CN][52] = 127,
+ [0][1][2][1][RTW89_QATAR][52] = 127,
+ [0][1][2][1][RTW89_UK][52] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 66,
+ [1][0][2][0][RTW89_ETSI][1] = 64,
+ [1][0][2][0][RTW89_MKK][1] = 62,
+ [1][0][2][0][RTW89_IC][1] = 64,
+ [1][0][2][0][RTW89_KCC][1] = 70,
+ [1][0][2][0][RTW89_ACMA][1] = 64,
+ [1][0][2][0][RTW89_CHILE][1] = 42,
+ [1][0][2][0][RTW89_UKRAINE][1] = 52,
+ [1][0][2][0][RTW89_MEXICO][1] = 62,
+ [1][0][2][0][RTW89_CN][1] = 62,
+ [1][0][2][0][RTW89_QATAR][1] = 64,
+ [1][0][2][0][RTW89_UK][1] = 64,
+ [1][0][2][0][RTW89_FCC][5] = 78,
+ [1][0][2][0][RTW89_ETSI][5] = 64,
+ [1][0][2][0][RTW89_MKK][5] = 62,
+ [1][0][2][0][RTW89_IC][5] = 64,
+ [1][0][2][0][RTW89_KCC][5] = 66,
+ [1][0][2][0][RTW89_ACMA][5] = 64,
+ [1][0][2][0][RTW89_CHILE][5] = 42,
+ [1][0][2][0][RTW89_UKRAINE][5] = 52,
+ [1][0][2][0][RTW89_MEXICO][5] = 62,
+ [1][0][2][0][RTW89_CN][5] = 62,
+ [1][0][2][0][RTW89_QATAR][5] = 64,
+ [1][0][2][0][RTW89_UK][5] = 64,
+ [1][0][2][0][RTW89_FCC][9] = 78,
+ [1][0][2][0][RTW89_ETSI][9] = 64,
+ [1][0][2][0][RTW89_MKK][9] = 62,
+ [1][0][2][0][RTW89_IC][9] = 64,
+ [1][0][2][0][RTW89_KCC][9] = 74,
+ [1][0][2][0][RTW89_ACMA][9] = 64,
+ [1][0][2][0][RTW89_CHILE][9] = 66,
+ [1][0][2][0][RTW89_UKRAINE][9] = 52,
+ [1][0][2][0][RTW89_MEXICO][9] = 78,
+ [1][0][2][0][RTW89_CN][9] = 62,
+ [1][0][2][0][RTW89_QATAR][9] = 64,
+ [1][0][2][0][RTW89_UK][9] = 64,
+ [1][0][2][0][RTW89_FCC][13] = 66,
+ [1][0][2][0][RTW89_ETSI][13] = 64,
+ [1][0][2][0][RTW89_MKK][13] = 62,
+ [1][0][2][0][RTW89_IC][13] = 64,
+ [1][0][2][0][RTW89_KCC][13] = 68,
+ [1][0][2][0][RTW89_ACMA][13] = 64,
+ [1][0][2][0][RTW89_CHILE][13] = 66,
+ [1][0][2][0][RTW89_UKRAINE][13] = 52,
+ [1][0][2][0][RTW89_MEXICO][13] = 66,
+ [1][0][2][0][RTW89_CN][13] = 62,
+ [1][0][2][0][RTW89_QATAR][13] = 64,
+ [1][0][2][0][RTW89_UK][13] = 64,
+ [1][0][2][0][RTW89_FCC][16] = 64,
+ [1][0][2][0][RTW89_ETSI][16] = 64,
+ [1][0][2][0][RTW89_MKK][16] = 74,
+ [1][0][2][0][RTW89_IC][16] = 64,
+ [1][0][2][0][RTW89_KCC][16] = 70,
+ [1][0][2][0][RTW89_ACMA][16] = 64,
+ [1][0][2][0][RTW89_CHILE][16] = 64,
+ [1][0][2][0][RTW89_UKRAINE][16] = 52,
+ [1][0][2][0][RTW89_MEXICO][16] = 64,
+ [1][0][2][0][RTW89_CN][16] = 127,
+ [1][0][2][0][RTW89_QATAR][16] = 64,
+ [1][0][2][0][RTW89_UK][16] = 64,
+ [1][0][2][0][RTW89_FCC][20] = 78,
+ [1][0][2][0][RTW89_ETSI][20] = 64,
+ [1][0][2][0][RTW89_MKK][20] = 74,
+ [1][0][2][0][RTW89_IC][20] = 78,
+ [1][0][2][0][RTW89_KCC][20] = 70,
+ [1][0][2][0][RTW89_ACMA][20] = 64,
+ [1][0][2][0][RTW89_CHILE][20] = 62,
+ [1][0][2][0][RTW89_UKRAINE][20] = 52,
+ [1][0][2][0][RTW89_MEXICO][20] = 78,
+ [1][0][2][0][RTW89_CN][20] = 127,
+ [1][0][2][0][RTW89_QATAR][20] = 64,
+ [1][0][2][0][RTW89_UK][20] = 64,
+ [1][0][2][0][RTW89_FCC][24] = 78,
+ [1][0][2][0][RTW89_ETSI][24] = 64,
+ [1][0][2][0][RTW89_MKK][24] = 74,
+ [1][0][2][0][RTW89_IC][24] = 127,
+ [1][0][2][0][RTW89_KCC][24] = 70,
+ [1][0][2][0][RTW89_ACMA][24] = 127,
+ [1][0][2][0][RTW89_CHILE][24] = 62,
+ [1][0][2][0][RTW89_UKRAINE][24] = 52,
+ [1][0][2][0][RTW89_MEXICO][24] = 78,
+ [1][0][2][0][RTW89_CN][24] = 127,
+ [1][0][2][0][RTW89_QATAR][24] = 64,
+ [1][0][2][0][RTW89_UK][24] = 64,
+ [1][0][2][0][RTW89_FCC][28] = 78,
+ [1][0][2][0][RTW89_ETSI][28] = 64,
+ [1][0][2][0][RTW89_MKK][28] = 74,
+ [1][0][2][0][RTW89_IC][28] = 127,
+ [1][0][2][0][RTW89_KCC][28] = 74,
+ [1][0][2][0][RTW89_ACMA][28] = 127,
+ [1][0][2][0][RTW89_CHILE][28] = 64,
+ [1][0][2][0][RTW89_UKRAINE][28] = 52,
+ [1][0][2][0][RTW89_MEXICO][28] = 78,
+ [1][0][2][0][RTW89_CN][28] = 127,
+ [1][0][2][0][RTW89_QATAR][28] = 64,
+ [1][0][2][0][RTW89_UK][28] = 64,
+ [1][0][2][0][RTW89_FCC][32] = 76,
+ [1][0][2][0][RTW89_ETSI][32] = 64,
+ [1][0][2][0][RTW89_MKK][32] = 74,
+ [1][0][2][0][RTW89_IC][32] = 76,
+ [1][0][2][0][RTW89_KCC][32] = 74,
+ [1][0][2][0][RTW89_ACMA][32] = 64,
+ [1][0][2][0][RTW89_CHILE][32] = 64,
+ [1][0][2][0][RTW89_UKRAINE][32] = 52,
+ [1][0][2][0][RTW89_MEXICO][32] = 76,
+ [1][0][2][0][RTW89_CN][32] = 127,
+ [1][0][2][0][RTW89_QATAR][32] = 64,
+ [1][0][2][0][RTW89_UK][32] = 64,
+ [1][0][2][0][RTW89_FCC][36] = 78,
+ [1][0][2][0][RTW89_ETSI][36] = 127,
+ [1][0][2][0][RTW89_MKK][36] = 74,
+ [1][0][2][0][RTW89_IC][36] = 78,
+ [1][0][2][0][RTW89_KCC][36] = 74,
+ [1][0][2][0][RTW89_ACMA][36] = 74,
+ [1][0][2][0][RTW89_CHILE][36] = 64,
+ [1][0][2][0][RTW89_UKRAINE][36] = 127,
+ [1][0][2][0][RTW89_MEXICO][36] = 78,
+ [1][0][2][0][RTW89_CN][36] = 127,
+ [1][0][2][0][RTW89_QATAR][36] = 127,
+ [1][0][2][0][RTW89_UK][36] = 74,
+ [1][0][2][0][RTW89_FCC][39] = 78,
+ [1][0][2][0][RTW89_ETSI][39] = 28,
+ [1][0][2][0][RTW89_MKK][39] = 127,
+ [1][0][2][0][RTW89_IC][39] = 78,
+ [1][0][2][0][RTW89_KCC][39] = 74,
+ [1][0][2][0][RTW89_ACMA][39] = 74,
+ [1][0][2][0][RTW89_CHILE][39] = 64,
+ [1][0][2][0][RTW89_UKRAINE][39] = 28,
+ [1][0][2][0][RTW89_MEXICO][39] = 78,
+ [1][0][2][0][RTW89_CN][39] = 70,
+ [1][0][2][0][RTW89_QATAR][39] = 28,
+ [1][0][2][0][RTW89_UK][39] = 64,
+ [1][0][2][0][RTW89_FCC][43] = 78,
+ [1][0][2][0][RTW89_ETSI][43] = 28,
+ [1][0][2][0][RTW89_MKK][43] = 127,
+ [1][0][2][0][RTW89_IC][43] = 78,
+ [1][0][2][0][RTW89_KCC][43] = 74,
+ [1][0][2][0][RTW89_ACMA][43] = 74,
+ [1][0][2][0][RTW89_CHILE][43] = 64,
+ [1][0][2][0][RTW89_UKRAINE][43] = 28,
+ [1][0][2][0][RTW89_MEXICO][43] = 78,
+ [1][0][2][0][RTW89_CN][43] = 74,
+ [1][0][2][0][RTW89_QATAR][43] = 28,
+ [1][0][2][0][RTW89_UK][43] = 62,
+ [1][0][2][0][RTW89_FCC][47] = 78,
+ [1][0][2][0][RTW89_ETSI][47] = 127,
+ [1][0][2][0][RTW89_MKK][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_KCC][47] = 127,
+ [1][0][2][0][RTW89_ACMA][47] = 127,
+ [1][0][2][0][RTW89_CHILE][47] = 127,
+ [1][0][2][0][RTW89_UKRAINE][47] = 127,
+ [1][0][2][0][RTW89_MEXICO][47] = 127,
+ [1][0][2][0][RTW89_CN][47] = 127,
+ [1][0][2][0][RTW89_QATAR][47] = 127,
+ [1][0][2][0][RTW89_UK][47] = 127,
+ [1][0][2][0][RTW89_FCC][51] = 70,
+ [1][0][2][0][RTW89_ETSI][51] = 127,
+ [1][0][2][0][RTW89_MKK][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_KCC][51] = 127,
+ [1][0][2][0][RTW89_ACMA][51] = 127,
+ [1][0][2][0][RTW89_CHILE][51] = 127,
+ [1][0][2][0][RTW89_UKRAINE][51] = 127,
+ [1][0][2][0][RTW89_MEXICO][51] = 127,
+ [1][0][2][0][RTW89_CN][51] = 127,
+ [1][0][2][0][RTW89_QATAR][51] = 127,
+ [1][0][2][0][RTW89_UK][51] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 62,
+ [1][1][2][0][RTW89_ETSI][1] = 52,
+ [1][1][2][0][RTW89_MKK][1] = 50,
+ [1][1][2][0][RTW89_IC][1] = 52,
+ [1][1][2][0][RTW89_KCC][1] = 58,
+ [1][1][2][0][RTW89_ACMA][1] = 52,
+ [1][1][2][0][RTW89_CHILE][1] = 30,
+ [1][1][2][0][RTW89_UKRAINE][1] = 40,
+ [1][1][2][0][RTW89_MEXICO][1] = 50,
+ [1][1][2][0][RTW89_CN][1] = 50,
+ [1][1][2][0][RTW89_QATAR][1] = 52,
+ [1][1][2][0][RTW89_UK][1] = 52,
+ [1][1][2][0][RTW89_FCC][5] = 76,
+ [1][1][2][0][RTW89_ETSI][5] = 52,
+ [1][1][2][0][RTW89_MKK][5] = 50,
+ [1][1][2][0][RTW89_IC][5] = 52,
+ [1][1][2][0][RTW89_KCC][5] = 48,
+ [1][1][2][0][RTW89_ACMA][5] = 52,
+ [1][1][2][0][RTW89_CHILE][5] = 30,
+ [1][1][2][0][RTW89_UKRAINE][5] = 40,
+ [1][1][2][0][RTW89_MEXICO][5] = 50,
+ [1][1][2][0][RTW89_CN][5] = 50,
+ [1][1][2][0][RTW89_QATAR][5] = 52,
+ [1][1][2][0][RTW89_UK][5] = 52,
+ [1][1][2][0][RTW89_FCC][9] = 76,
+ [1][1][2][0][RTW89_ETSI][9] = 52,
+ [1][1][2][0][RTW89_MKK][9] = 50,
+ [1][1][2][0][RTW89_IC][9] = 52,
+ [1][1][2][0][RTW89_KCC][9] = 60,
+ [1][1][2][0][RTW89_ACMA][9] = 52,
+ [1][1][2][0][RTW89_CHILE][9] = 50,
+ [1][1][2][0][RTW89_UKRAINE][9] = 40,
+ [1][1][2][0][RTW89_MEXICO][9] = 76,
+ [1][1][2][0][RTW89_CN][9] = 50,
+ [1][1][2][0][RTW89_QATAR][9] = 52,
+ [1][1][2][0][RTW89_UK][9] = 52,
+ [1][1][2][0][RTW89_FCC][13] = 62,
+ [1][1][2][0][RTW89_ETSI][13] = 52,
+ [1][1][2][0][RTW89_MKK][13] = 50,
+ [1][1][2][0][RTW89_IC][13] = 52,
+ [1][1][2][0][RTW89_KCC][13] = 58,
+ [1][1][2][0][RTW89_ACMA][13] = 52,
+ [1][1][2][0][RTW89_CHILE][13] = 48,
+ [1][1][2][0][RTW89_UKRAINE][13] = 40,
+ [1][1][2][0][RTW89_MEXICO][13] = 62,
+ [1][1][2][0][RTW89_CN][13] = 50,
+ [1][1][2][0][RTW89_QATAR][13] = 52,
+ [1][1][2][0][RTW89_UK][13] = 52,
+ [1][1][2][0][RTW89_FCC][16] = 56,
+ [1][1][2][0][RTW89_ETSI][16] = 52,
+ [1][1][2][0][RTW89_MKK][16] = 70,
+ [1][1][2][0][RTW89_IC][16] = 56,
+ [1][1][2][0][RTW89_KCC][16] = 58,
+ [1][1][2][0][RTW89_ACMA][16] = 52,
+ [1][1][2][0][RTW89_CHILE][16] = 48,
+ [1][1][2][0][RTW89_UKRAINE][16] = 40,
+ [1][1][2][0][RTW89_MEXICO][16] = 56,
+ [1][1][2][0][RTW89_CN][16] = 127,
+ [1][1][2][0][RTW89_QATAR][16] = 52,
+ [1][1][2][0][RTW89_UK][16] = 52,
+ [1][1][2][0][RTW89_FCC][20] = 76,
+ [1][1][2][0][RTW89_ETSI][20] = 52,
+ [1][1][2][0][RTW89_MKK][20] = 70,
+ [1][1][2][0][RTW89_IC][20] = 76,
+ [1][1][2][0][RTW89_KCC][20] = 58,
+ [1][1][2][0][RTW89_ACMA][20] = 52,
+ [1][1][2][0][RTW89_CHILE][20] = 50,
+ [1][1][2][0][RTW89_UKRAINE][20] = 40,
+ [1][1][2][0][RTW89_MEXICO][20] = 76,
+ [1][1][2][0][RTW89_CN][20] = 127,
+ [1][1][2][0][RTW89_QATAR][20] = 52,
+ [1][1][2][0][RTW89_UK][20] = 52,
+ [1][1][2][0][RTW89_FCC][24] = 76,
+ [1][1][2][0][RTW89_ETSI][24] = 52,
+ [1][1][2][0][RTW89_MKK][24] = 70,
+ [1][1][2][0][RTW89_IC][24] = 127,
+ [1][1][2][0][RTW89_KCC][24] = 58,
+ [1][1][2][0][RTW89_ACMA][24] = 127,
+ [1][1][2][0][RTW89_CHILE][24] = 50,
+ [1][1][2][0][RTW89_UKRAINE][24] = 40,
+ [1][1][2][0][RTW89_MEXICO][24] = 76,
+ [1][1][2][0][RTW89_CN][24] = 127,
+ [1][1][2][0][RTW89_QATAR][24] = 52,
+ [1][1][2][0][RTW89_UK][24] = 52,
+ [1][1][2][0][RTW89_FCC][28] = 76,
+ [1][1][2][0][RTW89_ETSI][28] = 52,
+ [1][1][2][0][RTW89_MKK][28] = 70,
+ [1][1][2][0][RTW89_IC][28] = 127,
+ [1][1][2][0][RTW89_KCC][28] = 60,
+ [1][1][2][0][RTW89_ACMA][28] = 127,
+ [1][1][2][0][RTW89_CHILE][28] = 48,
+ [1][1][2][0][RTW89_UKRAINE][28] = 40,
+ [1][1][2][0][RTW89_MEXICO][28] = 76,
+ [1][1][2][0][RTW89_CN][28] = 127,
+ [1][1][2][0][RTW89_QATAR][28] = 52,
+ [1][1][2][0][RTW89_UK][28] = 52,
+ [1][1][2][0][RTW89_FCC][32] = 68,
+ [1][1][2][0][RTW89_ETSI][32] = 52,
+ [1][1][2][0][RTW89_MKK][32] = 70,
+ [1][1][2][0][RTW89_IC][32] = 68,
+ [1][1][2][0][RTW89_KCC][32] = 60,
+ [1][1][2][0][RTW89_ACMA][32] = 52,
+ [1][1][2][0][RTW89_CHILE][32] = 48,
+ [1][1][2][0][RTW89_UKRAINE][32] = 40,
+ [1][1][2][0][RTW89_MEXICO][32] = 68,
+ [1][1][2][0][RTW89_CN][32] = 127,
+ [1][1][2][0][RTW89_QATAR][32] = 52,
+ [1][1][2][0][RTW89_UK][32] = 52,
+ [1][1][2][0][RTW89_FCC][36] = 76,
+ [1][1][2][0][RTW89_ETSI][36] = 127,
+ [1][1][2][0][RTW89_MKK][36] = 70,
+ [1][1][2][0][RTW89_IC][36] = 76,
+ [1][1][2][0][RTW89_KCC][36] = 60,
+ [1][1][2][0][RTW89_ACMA][36] = 74,
+ [1][1][2][0][RTW89_CHILE][36] = 50,
+ [1][1][2][0][RTW89_UKRAINE][36] = 127,
+ [1][1][2][0][RTW89_MEXICO][36] = 76,
+ [1][1][2][0][RTW89_CN][36] = 127,
+ [1][1][2][0][RTW89_QATAR][36] = 127,
+ [1][1][2][0][RTW89_UK][36] = 74,
+ [1][1][2][0][RTW89_FCC][39] = 78,
+ [1][1][2][0][RTW89_ETSI][39] = 16,
+ [1][1][2][0][RTW89_MKK][39] = 127,
+ [1][1][2][0][RTW89_IC][39] = 78,
+ [1][1][2][0][RTW89_KCC][39] = 58,
+ [1][1][2][0][RTW89_ACMA][39] = 72,
+ [1][1][2][0][RTW89_CHILE][39] = 52,
+ [1][1][2][0][RTW89_UKRAINE][39] = 16,
+ [1][1][2][0][RTW89_MEXICO][39] = 78,
+ [1][1][2][0][RTW89_CN][39] = 70,
+ [1][1][2][0][RTW89_QATAR][39] = 16,
+ [1][1][2][0][RTW89_UK][39] = 52,
+ [1][1][2][0][RTW89_FCC][43] = 78,
+ [1][1][2][0][RTW89_ETSI][43] = 16,
+ [1][1][2][0][RTW89_MKK][43] = 127,
+ [1][1][2][0][RTW89_IC][43] = 78,
+ [1][1][2][0][RTW89_KCC][43] = 58,
+ [1][1][2][0][RTW89_ACMA][43] = 74,
+ [1][1][2][0][RTW89_CHILE][43] = 52,
+ [1][1][2][0][RTW89_UKRAINE][43] = 16,
+ [1][1][2][0][RTW89_MEXICO][43] = 78,
+ [1][1][2][0][RTW89_CN][43] = 74,
+ [1][1][2][0][RTW89_QATAR][43] = 16,
+ [1][1][2][0][RTW89_UK][43] = 52,
+ [1][1][2][0][RTW89_FCC][47] = 68,
+ [1][1][2][0][RTW89_ETSI][47] = 127,
+ [1][1][2][0][RTW89_MKK][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_KCC][47] = 127,
+ [1][1][2][0][RTW89_ACMA][47] = 127,
+ [1][1][2][0][RTW89_CHILE][47] = 127,
+ [1][1][2][0][RTW89_UKRAINE][47] = 127,
+ [1][1][2][0][RTW89_MEXICO][47] = 127,
+ [1][1][2][0][RTW89_CN][47] = 127,
+ [1][1][2][0][RTW89_QATAR][47] = 127,
+ [1][1][2][0][RTW89_UK][47] = 127,
+ [1][1][2][0][RTW89_FCC][51] = 66,
+ [1][1][2][0][RTW89_ETSI][51] = 127,
+ [1][1][2][0][RTW89_MKK][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_KCC][51] = 127,
+ [1][1][2][0][RTW89_ACMA][51] = 127,
+ [1][1][2][0][RTW89_CHILE][51] = 127,
+ [1][1][2][0][RTW89_UKRAINE][51] = 127,
+ [1][1][2][0][RTW89_MEXICO][51] = 127,
+ [1][1][2][0][RTW89_CN][51] = 127,
+ [1][1][2][0][RTW89_QATAR][51] = 127,
+ [1][1][2][0][RTW89_UK][51] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 62,
+ [1][1][2][1][RTW89_ETSI][1] = 40,
+ [1][1][2][1][RTW89_MKK][1] = 50,
+ [1][1][2][1][RTW89_IC][1] = 40,
+ [1][1][2][1][RTW89_KCC][1] = 58,
+ [1][1][2][1][RTW89_ACMA][1] = 40,
+ [1][1][2][1][RTW89_CHILE][1] = 16,
+ [1][1][2][1][RTW89_UKRAINE][1] = 28,
+ [1][1][2][1][RTW89_MEXICO][1] = 50,
+ [1][1][2][1][RTW89_CN][1] = 38,
+ [1][1][2][1][RTW89_QATAR][1] = 40,
+ [1][1][2][1][RTW89_UK][1] = 40,
+ [1][1][2][1][RTW89_FCC][5] = 68,
+ [1][1][2][1][RTW89_ETSI][5] = 40,
+ [1][1][2][1][RTW89_MKK][5] = 50,
+ [1][1][2][1][RTW89_IC][5] = 40,
+ [1][1][2][1][RTW89_KCC][5] = 48,
+ [1][1][2][1][RTW89_ACMA][5] = 40,
+ [1][1][2][1][RTW89_CHILE][5] = 16,
+ [1][1][2][1][RTW89_UKRAINE][5] = 28,
+ [1][1][2][1][RTW89_MEXICO][5] = 50,
+ [1][1][2][1][RTW89_CN][5] = 38,
+ [1][1][2][1][RTW89_QATAR][5] = 40,
+ [1][1][2][1][RTW89_UK][5] = 40,
+ [1][1][2][1][RTW89_FCC][9] = 68,
+ [1][1][2][1][RTW89_ETSI][9] = 40,
+ [1][1][2][1][RTW89_MKK][9] = 50,
+ [1][1][2][1][RTW89_IC][9] = 40,
+ [1][1][2][1][RTW89_KCC][9] = 60,
+ [1][1][2][1][RTW89_ACMA][9] = 40,
+ [1][1][2][1][RTW89_CHILE][9] = 36,
+ [1][1][2][1][RTW89_UKRAINE][9] = 28,
+ [1][1][2][1][RTW89_MEXICO][9] = 68,
+ [1][1][2][1][RTW89_CN][9] = 38,
+ [1][1][2][1][RTW89_QATAR][9] = 40,
+ [1][1][2][1][RTW89_UK][9] = 40,
+ [1][1][2][1][RTW89_FCC][13] = 62,
+ [1][1][2][1][RTW89_ETSI][13] = 40,
+ [1][1][2][1][RTW89_MKK][13] = 50,
+ [1][1][2][1][RTW89_IC][13] = 40,
+ [1][1][2][1][RTW89_KCC][13] = 58,
+ [1][1][2][1][RTW89_ACMA][13] = 40,
+ [1][1][2][1][RTW89_CHILE][13] = 36,
+ [1][1][2][1][RTW89_UKRAINE][13] = 28,
+ [1][1][2][1][RTW89_MEXICO][13] = 62,
+ [1][1][2][1][RTW89_CN][13] = 38,
+ [1][1][2][1][RTW89_QATAR][13] = 40,
+ [1][1][2][1][RTW89_UK][13] = 40,
+ [1][1][2][1][RTW89_FCC][16] = 56,
+ [1][1][2][1][RTW89_ETSI][16] = 40,
+ [1][1][2][1][RTW89_MKK][16] = 70,
+ [1][1][2][1][RTW89_IC][16] = 56,
+ [1][1][2][1][RTW89_KCC][16] = 58,
+ [1][1][2][1][RTW89_ACMA][16] = 40,
+ [1][1][2][1][RTW89_CHILE][16] = 36,
+ [1][1][2][1][RTW89_UKRAINE][16] = 28,
+ [1][1][2][1][RTW89_MEXICO][16] = 56,
+ [1][1][2][1][RTW89_CN][16] = 127,
+ [1][1][2][1][RTW89_QATAR][16] = 40,
+ [1][1][2][1][RTW89_UK][16] = 40,
+ [1][1][2][1][RTW89_FCC][20] = 68,
+ [1][1][2][1][RTW89_ETSI][20] = 40,
+ [1][1][2][1][RTW89_MKK][20] = 70,
+ [1][1][2][1][RTW89_IC][20] = 68,
+ [1][1][2][1][RTW89_KCC][20] = 58,
+ [1][1][2][1][RTW89_ACMA][20] = 40,
+ [1][1][2][1][RTW89_CHILE][20] = 36,
+ [1][1][2][1][RTW89_UKRAINE][20] = 28,
+ [1][1][2][1][RTW89_MEXICO][20] = 68,
+ [1][1][2][1][RTW89_CN][20] = 127,
+ [1][1][2][1][RTW89_QATAR][20] = 40,
+ [1][1][2][1][RTW89_UK][20] = 40,
+ [1][1][2][1][RTW89_FCC][24] = 68,
+ [1][1][2][1][RTW89_ETSI][24] = 40,
+ [1][1][2][1][RTW89_MKK][24] = 70,
+ [1][1][2][1][RTW89_IC][24] = 127,
+ [1][1][2][1][RTW89_KCC][24] = 58,
+ [1][1][2][1][RTW89_ACMA][24] = 127,
+ [1][1][2][1][RTW89_CHILE][24] = 36,
+ [1][1][2][1][RTW89_UKRAINE][24] = 28,
+ [1][1][2][1][RTW89_MEXICO][24] = 68,
+ [1][1][2][1][RTW89_CN][24] = 127,
+ [1][1][2][1][RTW89_QATAR][24] = 40,
+ [1][1][2][1][RTW89_UK][24] = 40,
+ [1][1][2][1][RTW89_FCC][28] = 68,
+ [1][1][2][1][RTW89_ETSI][28] = 40,
+ [1][1][2][1][RTW89_MKK][28] = 70,
+ [1][1][2][1][RTW89_IC][28] = 127,
+ [1][1][2][1][RTW89_KCC][28] = 60,
+ [1][1][2][1][RTW89_ACMA][28] = 127,
+ [1][1][2][1][RTW89_CHILE][28] = 36,
+ [1][1][2][1][RTW89_UKRAINE][28] = 28,
+ [1][1][2][1][RTW89_MEXICO][28] = 68,
+ [1][1][2][1][RTW89_CN][28] = 127,
+ [1][1][2][1][RTW89_QATAR][28] = 40,
+ [1][1][2][1][RTW89_UK][28] = 40,
+ [1][1][2][1][RTW89_FCC][32] = 68,
+ [1][1][2][1][RTW89_ETSI][32] = 40,
+ [1][1][2][1][RTW89_MKK][32] = 70,
+ [1][1][2][1][RTW89_IC][32] = 68,
+ [1][1][2][1][RTW89_KCC][32] = 60,
+ [1][1][2][1][RTW89_ACMA][32] = 40,
+ [1][1][2][1][RTW89_CHILE][32] = 36,
+ [1][1][2][1][RTW89_UKRAINE][32] = 28,
+ [1][1][2][1][RTW89_MEXICO][32] = 68,
+ [1][1][2][1][RTW89_CN][32] = 127,
+ [1][1][2][1][RTW89_QATAR][32] = 40,
+ [1][1][2][1][RTW89_UK][32] = 40,
+ [1][1][2][1][RTW89_FCC][36] = 68,
+ [1][1][2][1][RTW89_ETSI][36] = 127,
+ [1][1][2][1][RTW89_MKK][36] = 70,
+ [1][1][2][1][RTW89_IC][36] = 68,
+ [1][1][2][1][RTW89_KCC][36] = 60,
+ [1][1][2][1][RTW89_ACMA][36] = 70,
+ [1][1][2][1][RTW89_CHILE][36] = 36,
+ [1][1][2][1][RTW89_UKRAINE][36] = 127,
+ [1][1][2][1][RTW89_MEXICO][36] = 68,
+ [1][1][2][1][RTW89_CN][36] = 127,
+ [1][1][2][1][RTW89_QATAR][36] = 127,
+ [1][1][2][1][RTW89_UK][36] = 62,
+ [1][1][2][1][RTW89_FCC][39] = 78,
+ [1][1][2][1][RTW89_ETSI][39] = 4,
+ [1][1][2][1][RTW89_MKK][39] = 127,
+ [1][1][2][1][RTW89_IC][39] = 78,
+ [1][1][2][1][RTW89_KCC][39] = 58,
+ [1][1][2][1][RTW89_ACMA][39] = 72,
+ [1][1][2][1][RTW89_CHILE][39] = 36,
+ [1][1][2][1][RTW89_UKRAINE][39] = 4,
+ [1][1][2][1][RTW89_MEXICO][39] = 78,
+ [1][1][2][1][RTW89_CN][39] = 70,
+ [1][1][2][1][RTW89_QATAR][39] = 4,
+ [1][1][2][1][RTW89_UK][39] = 40,
+ [1][1][2][1][RTW89_FCC][43] = 78,
+ [1][1][2][1][RTW89_ETSI][43] = 4,
+ [1][1][2][1][RTW89_MKK][43] = 127,
+ [1][1][2][1][RTW89_IC][43] = 78,
+ [1][1][2][1][RTW89_KCC][43] = 58,
+ [1][1][2][1][RTW89_ACMA][43] = 74,
+ [1][1][2][1][RTW89_CHILE][43] = 36,
+ [1][1][2][1][RTW89_UKRAINE][43] = 4,
+ [1][1][2][1][RTW89_MEXICO][43] = 78,
+ [1][1][2][1][RTW89_CN][43] = 74,
+ [1][1][2][1][RTW89_QATAR][43] = 4,
+ [1][1][2][1][RTW89_UK][43] = 40,
+ [1][1][2][1][RTW89_FCC][47] = 68,
+ [1][1][2][1][RTW89_ETSI][47] = 127,
+ [1][1][2][1][RTW89_MKK][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_KCC][47] = 127,
+ [1][1][2][1][RTW89_ACMA][47] = 127,
+ [1][1][2][1][RTW89_CHILE][47] = 127,
+ [1][1][2][1][RTW89_UKRAINE][47] = 127,
+ [1][1][2][1][RTW89_MEXICO][47] = 127,
+ [1][1][2][1][RTW89_CN][47] = 127,
+ [1][1][2][1][RTW89_QATAR][47] = 127,
+ [1][1][2][1][RTW89_UK][47] = 127,
+ [1][1][2][1][RTW89_FCC][51] = 66,
+ [1][1][2][1][RTW89_ETSI][51] = 127,
+ [1][1][2][1][RTW89_MKK][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_KCC][51] = 127,
+ [1][1][2][1][RTW89_ACMA][51] = 127,
+ [1][1][2][1][RTW89_CHILE][51] = 127,
+ [1][1][2][1][RTW89_UKRAINE][51] = 127,
+ [1][1][2][1][RTW89_MEXICO][51] = 127,
+ [1][1][2][1][RTW89_CN][51] = 127,
+ [1][1][2][1][RTW89_QATAR][51] = 127,
+ [1][1][2][1][RTW89_UK][51] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 64,
+ [2][0][2][0][RTW89_ETSI][3] = 64,
+ [2][0][2][0][RTW89_MKK][3] = 64,
+ [2][0][2][0][RTW89_IC][3] = 62,
+ [2][0][2][0][RTW89_KCC][3] = 68,
+ [2][0][2][0][RTW89_ACMA][3] = 64,
+ [2][0][2][0][RTW89_CHILE][3] = 42,
+ [2][0][2][0][RTW89_UKRAINE][3] = 52,
+ [2][0][2][0][RTW89_MEXICO][3] = 62,
+ [2][0][2][0][RTW89_CN][3] = 62,
+ [2][0][2][0][RTW89_QATAR][3] = 64,
+ [2][0][2][0][RTW89_UK][3] = 64,
+ [2][0][2][0][RTW89_FCC][11] = 66,
+ [2][0][2][0][RTW89_ETSI][11] = 64,
+ [2][0][2][0][RTW89_MKK][11] = 64,
+ [2][0][2][0][RTW89_IC][11] = 64,
+ [2][0][2][0][RTW89_KCC][11] = 70,
+ [2][0][2][0][RTW89_ACMA][11] = 64,
+ [2][0][2][0][RTW89_CHILE][11] = 66,
+ [2][0][2][0][RTW89_UKRAINE][11] = 52,
+ [2][0][2][0][RTW89_MEXICO][11] = 66,
+ [2][0][2][0][RTW89_CN][11] = 62,
+ [2][0][2][0][RTW89_QATAR][11] = 64,
+ [2][0][2][0][RTW89_UK][11] = 64,
+ [2][0][2][0][RTW89_FCC][18] = 62,
+ [2][0][2][0][RTW89_ETSI][18] = 64,
+ [2][0][2][0][RTW89_MKK][18] = 70,
+ [2][0][2][0][RTW89_IC][18] = 62,
+ [2][0][2][0][RTW89_KCC][18] = 64,
+ [2][0][2][0][RTW89_ACMA][18] = 64,
+ [2][0][2][0][RTW89_CHILE][18] = 64,
+ [2][0][2][0][RTW89_UKRAINE][18] = 52,
+ [2][0][2][0][RTW89_MEXICO][18] = 62,
+ [2][0][2][0][RTW89_CN][18] = 127,
+ [2][0][2][0][RTW89_QATAR][18] = 64,
+ [2][0][2][0][RTW89_UK][18] = 64,
+ [2][0][2][0][RTW89_FCC][26] = 74,
+ [2][0][2][0][RTW89_ETSI][26] = 64,
+ [2][0][2][0][RTW89_MKK][26] = 70,
+ [2][0][2][0][RTW89_IC][26] = 127,
+ [2][0][2][0][RTW89_KCC][26] = 70,
+ [2][0][2][0][RTW89_ACMA][26] = 127,
+ [2][0][2][0][RTW89_CHILE][26] = 64,
+ [2][0][2][0][RTW89_UKRAINE][26] = 52,
+ [2][0][2][0][RTW89_MEXICO][26] = 74,
+ [2][0][2][0][RTW89_CN][26] = 127,
+ [2][0][2][0][RTW89_QATAR][26] = 64,
+ [2][0][2][0][RTW89_UK][26] = 64,
+ [2][0][2][0][RTW89_FCC][34] = 74,
+ [2][0][2][0][RTW89_ETSI][34] = 127,
+ [2][0][2][0][RTW89_MKK][34] = 70,
+ [2][0][2][0][RTW89_IC][34] = 74,
+ [2][0][2][0][RTW89_KCC][34] = 70,
+ [2][0][2][0][RTW89_ACMA][34] = 70,
+ [2][0][2][0][RTW89_CHILE][34] = 64,
+ [2][0][2][0][RTW89_UKRAINE][34] = 127,
+ [2][0][2][0][RTW89_MEXICO][34] = 74,
+ [2][0][2][0][RTW89_CN][34] = 127,
+ [2][0][2][0][RTW89_QATAR][34] = 127,
+ [2][0][2][0][RTW89_UK][34] = 70,
+ [2][0][2][0][RTW89_FCC][41] = 74,
+ [2][0][2][0][RTW89_ETSI][41] = 28,
+ [2][0][2][0][RTW89_MKK][41] = 127,
+ [2][0][2][0][RTW89_IC][41] = 74,
+ [2][0][2][0][RTW89_KCC][41] = 66,
+ [2][0][2][0][RTW89_ACMA][41] = 70,
+ [2][0][2][0][RTW89_CHILE][41] = 64,
+ [2][0][2][0][RTW89_UKRAINE][41] = 28,
+ [2][0][2][0][RTW89_MEXICO][41] = 74,
+ [2][0][2][0][RTW89_CN][41] = 70,
+ [2][0][2][0][RTW89_QATAR][41] = 28,
+ [2][0][2][0][RTW89_UK][41] = 64,
+ [2][0][2][0][RTW89_FCC][49] = 64,
+ [2][0][2][0][RTW89_ETSI][49] = 127,
+ [2][0][2][0][RTW89_MKK][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_KCC][49] = 127,
+ [2][0][2][0][RTW89_ACMA][49] = 127,
+ [2][0][2][0][RTW89_CHILE][49] = 127,
+ [2][0][2][0][RTW89_UKRAINE][49] = 127,
+ [2][0][2][0][RTW89_MEXICO][49] = 127,
+ [2][0][2][0][RTW89_CN][49] = 127,
+ [2][0][2][0][RTW89_QATAR][49] = 127,
+ [2][0][2][0][RTW89_UK][49] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 56,
+ [2][1][2][0][RTW89_ETSI][3] = 52,
+ [2][1][2][0][RTW89_MKK][3] = 52,
+ [2][1][2][0][RTW89_IC][3] = 52,
+ [2][1][2][0][RTW89_KCC][3] = 54,
+ [2][1][2][0][RTW89_ACMA][3] = 52,
+ [2][1][2][0][RTW89_CHILE][3] = 28,
+ [2][1][2][0][RTW89_UKRAINE][3] = 40,
+ [2][1][2][0][RTW89_MEXICO][3] = 50,
+ [2][1][2][0][RTW89_CN][3] = 50,
+ [2][1][2][0][RTW89_QATAR][3] = 52,
+ [2][1][2][0][RTW89_UK][3] = 52,
+ [2][1][2][0][RTW89_FCC][11] = 62,
+ [2][1][2][0][RTW89_ETSI][11] = 52,
+ [2][1][2][0][RTW89_MKK][11] = 52,
+ [2][1][2][0][RTW89_IC][11] = 52,
+ [2][1][2][0][RTW89_KCC][11] = 56,
+ [2][1][2][0][RTW89_ACMA][11] = 52,
+ [2][1][2][0][RTW89_CHILE][11] = 52,
+ [2][1][2][0][RTW89_UKRAINE][11] = 40,
+ [2][1][2][0][RTW89_MEXICO][11] = 62,
+ [2][1][2][0][RTW89_CN][11] = 50,
+ [2][1][2][0][RTW89_QATAR][11] = 52,
+ [2][1][2][0][RTW89_UK][11] = 52,
+ [2][1][2][0][RTW89_FCC][18] = 56,
+ [2][1][2][0][RTW89_ETSI][18] = 52,
+ [2][1][2][0][RTW89_MKK][18] = 70,
+ [2][1][2][0][RTW89_IC][18] = 56,
+ [2][1][2][0][RTW89_KCC][18] = 58,
+ [2][1][2][0][RTW89_ACMA][18] = 52,
+ [2][1][2][0][RTW89_CHILE][18] = 48,
+ [2][1][2][0][RTW89_UKRAINE][18] = 40,
+ [2][1][2][0][RTW89_MEXICO][18] = 56,
+ [2][1][2][0][RTW89_CN][18] = 127,
+ [2][1][2][0][RTW89_QATAR][18] = 52,
+ [2][1][2][0][RTW89_UK][18] = 52,
+ [2][1][2][0][RTW89_FCC][26] = 70,
+ [2][1][2][0][RTW89_ETSI][26] = 52,
+ [2][1][2][0][RTW89_MKK][26] = 70,
+ [2][1][2][0][RTW89_IC][26] = 127,
+ [2][1][2][0][RTW89_KCC][26] = 56,
+ [2][1][2][0][RTW89_ACMA][26] = 127,
+ [2][1][2][0][RTW89_CHILE][26] = 50,
+ [2][1][2][0][RTW89_UKRAINE][26] = 40,
+ [2][1][2][0][RTW89_MEXICO][26] = 70,
+ [2][1][2][0][RTW89_CN][26] = 127,
+ [2][1][2][0][RTW89_QATAR][26] = 52,
+ [2][1][2][0][RTW89_UK][26] = 52,
+ [2][1][2][0][RTW89_FCC][34] = 74,
+ [2][1][2][0][RTW89_ETSI][34] = 127,
+ [2][1][2][0][RTW89_MKK][34] = 70,
+ [2][1][2][0][RTW89_IC][34] = 74,
+ [2][1][2][0][RTW89_KCC][34] = 56,
+ [2][1][2][0][RTW89_ACMA][34] = 70,
+ [2][1][2][0][RTW89_CHILE][34] = 50,
+ [2][1][2][0][RTW89_UKRAINE][34] = 127,
+ [2][1][2][0][RTW89_MEXICO][34] = 74,
+ [2][1][2][0][RTW89_CN][34] = 127,
+ [2][1][2][0][RTW89_QATAR][34] = 127,
+ [2][1][2][0][RTW89_UK][34] = 68,
+ [2][1][2][0][RTW89_FCC][41] = 74,
+ [2][1][2][0][RTW89_ETSI][41] = 16,
+ [2][1][2][0][RTW89_MKK][41] = 127,
+ [2][1][2][0][RTW89_IC][41] = 74,
+ [2][1][2][0][RTW89_KCC][41] = 56,
+ [2][1][2][0][RTW89_ACMA][41] = 70,
+ [2][1][2][0][RTW89_CHILE][41] = 50,
+ [2][1][2][0][RTW89_UKRAINE][41] = 16,
+ [2][1][2][0][RTW89_MEXICO][41] = 74,
+ [2][1][2][0][RTW89_CN][41] = 70,
+ [2][1][2][0][RTW89_QATAR][41] = 16,
+ [2][1][2][0][RTW89_UK][41] = 52,
+ [2][1][2][0][RTW89_FCC][49] = 58,
+ [2][1][2][0][RTW89_ETSI][49] = 127,
+ [2][1][2][0][RTW89_MKK][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_KCC][49] = 127,
+ [2][1][2][0][RTW89_ACMA][49] = 127,
+ [2][1][2][0][RTW89_CHILE][49] = 127,
+ [2][1][2][0][RTW89_UKRAINE][49] = 127,
+ [2][1][2][0][RTW89_MEXICO][49] = 127,
+ [2][1][2][0][RTW89_CN][49] = 127,
+ [2][1][2][0][RTW89_QATAR][49] = 127,
+ [2][1][2][0][RTW89_UK][49] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 56,
+ [2][1][2][1][RTW89_ETSI][3] = 40,
+ [2][1][2][1][RTW89_MKK][3] = 52,
+ [2][1][2][1][RTW89_IC][3] = 40,
+ [2][1][2][1][RTW89_KCC][3] = 54,
+ [2][1][2][1][RTW89_ACMA][3] = 40,
+ [2][1][2][1][RTW89_CHILE][3] = 16,
+ [2][1][2][1][RTW89_UKRAINE][3] = 28,
+ [2][1][2][1][RTW89_MEXICO][3] = 50,
+ [2][1][2][1][RTW89_CN][3] = 38,
+ [2][1][2][1][RTW89_QATAR][3] = 40,
+ [2][1][2][1][RTW89_UK][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 62,
+ [2][1][2][1][RTW89_ETSI][11] = 40,
+ [2][1][2][1][RTW89_MKK][11] = 52,
+ [2][1][2][1][RTW89_IC][11] = 40,
+ [2][1][2][1][RTW89_KCC][11] = 56,
+ [2][1][2][1][RTW89_ACMA][11] = 40,
+ [2][1][2][1][RTW89_CHILE][11] = 34,
+ [2][1][2][1][RTW89_UKRAINE][11] = 28,
+ [2][1][2][1][RTW89_MEXICO][11] = 62,
+ [2][1][2][1][RTW89_CN][11] = 38,
+ [2][1][2][1][RTW89_QATAR][11] = 40,
+ [2][1][2][1][RTW89_UK][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 56,
+ [2][1][2][1][RTW89_ETSI][18] = 40,
+ [2][1][2][1][RTW89_MKK][18] = 70,
+ [2][1][2][1][RTW89_IC][18] = 56,
+ [2][1][2][1][RTW89_KCC][18] = 58,
+ [2][1][2][1][RTW89_ACMA][18] = 40,
+ [2][1][2][1][RTW89_CHILE][18] = 34,
+ [2][1][2][1][RTW89_UKRAINE][18] = 28,
+ [2][1][2][1][RTW89_MEXICO][18] = 56,
+ [2][1][2][1][RTW89_CN][18] = 127,
+ [2][1][2][1][RTW89_QATAR][18] = 40,
+ [2][1][2][1][RTW89_UK][18] = 40,
+ [2][1][2][1][RTW89_FCC][26] = 68,
+ [2][1][2][1][RTW89_ETSI][26] = 40,
+ [2][1][2][1][RTW89_MKK][26] = 70,
+ [2][1][2][1][RTW89_IC][26] = 127,
+ [2][1][2][1][RTW89_KCC][26] = 56,
+ [2][1][2][1][RTW89_ACMA][26] = 127,
+ [2][1][2][1][RTW89_CHILE][26] = 34,
+ [2][1][2][1][RTW89_UKRAINE][26] = 28,
+ [2][1][2][1][RTW89_MEXICO][26] = 68,
+ [2][1][2][1][RTW89_CN][26] = 127,
+ [2][1][2][1][RTW89_QATAR][26] = 40,
+ [2][1][2][1][RTW89_UK][26] = 40,
+ [2][1][2][1][RTW89_FCC][34] = 68,
+ [2][1][2][1][RTW89_ETSI][34] = 127,
+ [2][1][2][1][RTW89_MKK][34] = 70,
+ [2][1][2][1][RTW89_IC][34] = 68,
+ [2][1][2][1][RTW89_KCC][34] = 56,
+ [2][1][2][1][RTW89_ACMA][34] = 70,
+ [2][1][2][1][RTW89_CHILE][34] = 34,
+ [2][1][2][1][RTW89_UKRAINE][34] = 127,
+ [2][1][2][1][RTW89_MEXICO][34] = 68,
+ [2][1][2][1][RTW89_CN][34] = 127,
+ [2][1][2][1][RTW89_QATAR][34] = 127,
+ [2][1][2][1][RTW89_UK][34] = 56,
+ [2][1][2][1][RTW89_FCC][41] = 74,
+ [2][1][2][1][RTW89_ETSI][41] = 4,
+ [2][1][2][1][RTW89_MKK][41] = 127,
+ [2][1][2][1][RTW89_IC][41] = 74,
+ [2][1][2][1][RTW89_KCC][41] = 56,
+ [2][1][2][1][RTW89_ACMA][41] = 70,
+ [2][1][2][1][RTW89_CHILE][41] = 36,
+ [2][1][2][1][RTW89_UKRAINE][41] = 4,
+ [2][1][2][1][RTW89_MEXICO][41] = 74,
+ [2][1][2][1][RTW89_CN][41] = 70,
+ [2][1][2][1][RTW89_QATAR][41] = 4,
+ [2][1][2][1][RTW89_UK][41] = 38,
+ [2][1][2][1][RTW89_FCC][49] = 58,
+ [2][1][2][1][RTW89_ETSI][49] = 127,
+ [2][1][2][1][RTW89_MKK][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_KCC][49] = 127,
+ [2][1][2][1][RTW89_ACMA][49] = 127,
+ [2][1][2][1][RTW89_CHILE][49] = 127,
+ [2][1][2][1][RTW89_UKRAINE][49] = 127,
+ [2][1][2][1][RTW89_MEXICO][49] = 127,
+ [2][1][2][1][RTW89_CN][49] = 127,
+ [2][1][2][1][RTW89_QATAR][49] = 127,
+ [2][1][2][1][RTW89_UK][49] = 127,
+};
+
+const s8 rtw89_8852b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 32,
+ [0][0][RTW89_WW][1] = 32,
+ [0][0][RTW89_WW][2] = 32,
+ [0][0][RTW89_WW][3] = 32,
+ [0][0][RTW89_WW][4] = 32,
+ [0][0][RTW89_WW][5] = 32,
+ [0][0][RTW89_WW][6] = 32,
+ [0][0][RTW89_WW][7] = 32,
+ [0][0][RTW89_WW][8] = 32,
+ [0][0][RTW89_WW][9] = 32,
+ [0][0][RTW89_WW][10] = 32,
+ [0][0][RTW89_WW][11] = 32,
+ [0][0][RTW89_WW][12] = 32,
+ [0][0][RTW89_WW][13] = 0,
+ [0][1][RTW89_WW][0] = 20,
+ [0][1][RTW89_WW][1] = 22,
+ [0][1][RTW89_WW][2] = 22,
+ [0][1][RTW89_WW][3] = 22,
+ [0][1][RTW89_WW][4] = 22,
+ [0][1][RTW89_WW][5] = 22,
+ [0][1][RTW89_WW][6] = 22,
+ [0][1][RTW89_WW][7] = 22,
+ [0][1][RTW89_WW][8] = 22,
+ [0][1][RTW89_WW][9] = 22,
+ [0][1][RTW89_WW][10] = 22,
+ [0][1][RTW89_WW][11] = 22,
+ [0][1][RTW89_WW][12] = 20,
+ [0][1][RTW89_WW][13] = 0,
+ [1][0][RTW89_WW][0] = 42,
+ [1][0][RTW89_WW][1] = 44,
+ [1][0][RTW89_WW][2] = 44,
+ [1][0][RTW89_WW][3] = 44,
+ [1][0][RTW89_WW][4] = 44,
+ [1][0][RTW89_WW][5] = 44,
+ [1][0][RTW89_WW][6] = 44,
+ [1][0][RTW89_WW][7] = 44,
+ [1][0][RTW89_WW][8] = 44,
+ [1][0][RTW89_WW][9] = 44,
+ [1][0][RTW89_WW][10] = 44,
+ [1][0][RTW89_WW][11] = 44,
+ [1][0][RTW89_WW][12] = 38,
+ [1][0][RTW89_WW][13] = 0,
+ [1][1][RTW89_WW][0] = 32,
+ [1][1][RTW89_WW][1] = 32,
+ [1][1][RTW89_WW][2] = 32,
+ [1][1][RTW89_WW][3] = 32,
+ [1][1][RTW89_WW][4] = 32,
+ [1][1][RTW89_WW][5] = 32,
+ [1][1][RTW89_WW][6] = 32,
+ [1][1][RTW89_WW][7] = 32,
+ [1][1][RTW89_WW][8] = 32,
+ [1][1][RTW89_WW][9] = 32,
+ [1][1][RTW89_WW][10] = 32,
+ [1][1][RTW89_WW][11] = 32,
+ [1][1][RTW89_WW][12] = 32,
+ [1][1][RTW89_WW][13] = 0,
+ [2][0][RTW89_WW][0] = 56,
+ [2][0][RTW89_WW][1] = 56,
+ [2][0][RTW89_WW][2] = 56,
+ [2][0][RTW89_WW][3] = 56,
+ [2][0][RTW89_WW][4] = 56,
+ [2][0][RTW89_WW][5] = 56,
+ [2][0][RTW89_WW][6] = 56,
+ [2][0][RTW89_WW][7] = 56,
+ [2][0][RTW89_WW][8] = 56,
+ [2][0][RTW89_WW][9] = 56,
+ [2][0][RTW89_WW][10] = 56,
+ [2][0][RTW89_WW][11] = 50,
+ [2][0][RTW89_WW][12] = 46,
+ [2][0][RTW89_WW][13] = 0,
+ [2][1][RTW89_WW][0] = 44,
+ [2][1][RTW89_WW][1] = 44,
+ [2][1][RTW89_WW][2] = 44,
+ [2][1][RTW89_WW][3] = 44,
+ [2][1][RTW89_WW][4] = 44,
+ [2][1][RTW89_WW][5] = 44,
+ [2][1][RTW89_WW][6] = 44,
+ [2][1][RTW89_WW][7] = 44,
+ [2][1][RTW89_WW][8] = 44,
+ [2][1][RTW89_WW][9] = 44,
+ [2][1][RTW89_WW][10] = 44,
+ [2][1][RTW89_WW][11] = 38,
+ [2][1][RTW89_WW][12] = 34,
+ [2][1][RTW89_WW][13] = 0,
+ [0][0][RTW89_FCC][0] = 68,
+ [0][0][RTW89_ETSI][0] = 32,
+ [0][0][RTW89_MKK][0] = 42,
+ [0][0][RTW89_IC][0] = 68,
+ [0][0][RTW89_KCC][0] = 44,
+ [0][0][RTW89_ACMA][0] = 32,
+ [0][0][RTW89_CHILE][0] = 66,
+ [0][0][RTW89_UKRAINE][0] = 32,
+ [0][0][RTW89_MEXICO][0] = 68,
+ [0][0][RTW89_CN][0] = 32,
+ [0][0][RTW89_QATAR][0] = 32,
+ [0][0][RTW89_UK][0] = 32,
+ [0][0][RTW89_FCC][1] = 68,
+ [0][0][RTW89_ETSI][1] = 32,
+ [0][0][RTW89_MKK][1] = 42,
+ [0][0][RTW89_IC][1] = 68,
+ [0][0][RTW89_KCC][1] = 44,
+ [0][0][RTW89_ACMA][1] = 32,
+ [0][0][RTW89_CHILE][1] = 64,
+ [0][0][RTW89_UKRAINE][1] = 32,
+ [0][0][RTW89_MEXICO][1] = 68,
+ [0][0][RTW89_CN][1] = 32,
+ [0][0][RTW89_QATAR][1] = 32,
+ [0][0][RTW89_UK][1] = 32,
+ [0][0][RTW89_FCC][2] = 72,
+ [0][0][RTW89_ETSI][2] = 32,
+ [0][0][RTW89_MKK][2] = 42,
+ [0][0][RTW89_IC][2] = 72,
+ [0][0][RTW89_KCC][2] = 44,
+ [0][0][RTW89_ACMA][2] = 32,
+ [0][0][RTW89_CHILE][2] = 64,
+ [0][0][RTW89_UKRAINE][2] = 32,
+ [0][0][RTW89_MEXICO][2] = 72,
+ [0][0][RTW89_CN][2] = 32,
+ [0][0][RTW89_QATAR][2] = 32,
+ [0][0][RTW89_UK][2] = 32,
+ [0][0][RTW89_FCC][3] = 76,
+ [0][0][RTW89_ETSI][3] = 32,
+ [0][0][RTW89_MKK][3] = 42,
+ [0][0][RTW89_IC][3] = 76,
+ [0][0][RTW89_KCC][3] = 44,
+ [0][0][RTW89_ACMA][3] = 32,
+ [0][0][RTW89_CHILE][3] = 64,
+ [0][0][RTW89_UKRAINE][3] = 32,
+ [0][0][RTW89_MEXICO][3] = 76,
+ [0][0][RTW89_CN][3] = 32,
+ [0][0][RTW89_QATAR][3] = 32,
+ [0][0][RTW89_UK][3] = 32,
+ [0][0][RTW89_FCC][4] = 76,
+ [0][0][RTW89_ETSI][4] = 32,
+ [0][0][RTW89_MKK][4] = 42,
+ [0][0][RTW89_IC][4] = 76,
+ [0][0][RTW89_KCC][4] = 44,
+ [0][0][RTW89_ACMA][4] = 32,
+ [0][0][RTW89_CHILE][4] = 64,
+ [0][0][RTW89_UKRAINE][4] = 32,
+ [0][0][RTW89_MEXICO][4] = 76,
+ [0][0][RTW89_CN][4] = 32,
+ [0][0][RTW89_QATAR][4] = 32,
+ [0][0][RTW89_UK][4] = 32,
+ [0][0][RTW89_FCC][5] = 84,
+ [0][0][RTW89_ETSI][5] = 32,
+ [0][0][RTW89_MKK][5] = 42,
+ [0][0][RTW89_IC][5] = 84,
+ [0][0][RTW89_KCC][5] = 44,
+ [0][0][RTW89_ACMA][5] = 32,
+ [0][0][RTW89_CHILE][5] = 64,
+ [0][0][RTW89_UKRAINE][5] = 32,
+ [0][0][RTW89_MEXICO][5] = 84,
+ [0][0][RTW89_CN][5] = 32,
+ [0][0][RTW89_QATAR][5] = 32,
+ [0][0][RTW89_UK][5] = 32,
+ [0][0][RTW89_FCC][6] = 74,
+ [0][0][RTW89_ETSI][6] = 32,
+ [0][0][RTW89_MKK][6] = 42,
+ [0][0][RTW89_IC][6] = 74,
+ [0][0][RTW89_KCC][6] = 44,
+ [0][0][RTW89_ACMA][6] = 32,
+ [0][0][RTW89_CHILE][6] = 64,
+ [0][0][RTW89_UKRAINE][6] = 32,
+ [0][0][RTW89_MEXICO][6] = 74,
+ [0][0][RTW89_CN][6] = 32,
+ [0][0][RTW89_QATAR][6] = 32,
+ [0][0][RTW89_UK][6] = 32,
+ [0][0][RTW89_FCC][7] = 74,
+ [0][0][RTW89_ETSI][7] = 32,
+ [0][0][RTW89_MKK][7] = 42,
+ [0][0][RTW89_IC][7] = 74,
+ [0][0][RTW89_KCC][7] = 44,
+ [0][0][RTW89_ACMA][7] = 32,
+ [0][0][RTW89_CHILE][7] = 64,
+ [0][0][RTW89_UKRAINE][7] = 32,
+ [0][0][RTW89_MEXICO][7] = 74,
+ [0][0][RTW89_CN][7] = 32,
+ [0][0][RTW89_QATAR][7] = 32,
+ [0][0][RTW89_UK][7] = 32,
+ [0][0][RTW89_FCC][8] = 70,
+ [0][0][RTW89_ETSI][8] = 32,
+ [0][0][RTW89_MKK][8] = 42,
+ [0][0][RTW89_IC][8] = 70,
+ [0][0][RTW89_KCC][8] = 44,
+ [0][0][RTW89_ACMA][8] = 32,
+ [0][0][RTW89_CHILE][8] = 64,
+ [0][0][RTW89_UKRAINE][8] = 32,
+ [0][0][RTW89_MEXICO][8] = 70,
+ [0][0][RTW89_CN][8] = 32,
+ [0][0][RTW89_QATAR][8] = 32,
+ [0][0][RTW89_UK][8] = 32,
+ [0][0][RTW89_FCC][9] = 66,
+ [0][0][RTW89_ETSI][9] = 32,
+ [0][0][RTW89_MKK][9] = 42,
+ [0][0][RTW89_IC][9] = 66,
+ [0][0][RTW89_KCC][9] = 42,
+ [0][0][RTW89_ACMA][9] = 32,
+ [0][0][RTW89_CHILE][9] = 64,
+ [0][0][RTW89_UKRAINE][9] = 32,
+ [0][0][RTW89_MEXICO][9] = 66,
+ [0][0][RTW89_CN][9] = 32,
+ [0][0][RTW89_QATAR][9] = 32,
+ [0][0][RTW89_UK][9] = 32,
+ [0][0][RTW89_FCC][10] = 66,
+ [0][0][RTW89_ETSI][10] = 32,
+ [0][0][RTW89_MKK][10] = 42,
+ [0][0][RTW89_IC][10] = 66,
+ [0][0][RTW89_KCC][10] = 42,
+ [0][0][RTW89_ACMA][10] = 32,
+ [0][0][RTW89_CHILE][10] = 66,
+ [0][0][RTW89_UKRAINE][10] = 32,
+ [0][0][RTW89_MEXICO][10] = 66,
+ [0][0][RTW89_CN][10] = 32,
+ [0][0][RTW89_QATAR][10] = 32,
+ [0][0][RTW89_UK][10] = 32,
+ [0][0][RTW89_FCC][11] = 50,
+ [0][0][RTW89_ETSI][11] = 32,
+ [0][0][RTW89_MKK][11] = 42,
+ [0][0][RTW89_IC][11] = 50,
+ [0][0][RTW89_KCC][11] = 42,
+ [0][0][RTW89_ACMA][11] = 32,
+ [0][0][RTW89_CHILE][11] = 64,
+ [0][0][RTW89_UKRAINE][11] = 32,
+ [0][0][RTW89_MEXICO][11] = 50,
+ [0][0][RTW89_CN][11] = 32,
+ [0][0][RTW89_QATAR][11] = 32,
+ [0][0][RTW89_UK][11] = 32,
+ [0][0][RTW89_FCC][12] = 32,
+ [0][0][RTW89_ETSI][12] = 32,
+ [0][0][RTW89_MKK][12] = 42,
+ [0][0][RTW89_IC][12] = 32,
+ [0][0][RTW89_KCC][12] = 42,
+ [0][0][RTW89_ACMA][12] = 32,
+ [0][0][RTW89_CHILE][12] = 64,
+ [0][0][RTW89_UKRAINE][12] = 32,
+ [0][0][RTW89_MEXICO][12] = 32,
+ [0][0][RTW89_CN][12] = 32,
+ [0][0][RTW89_QATAR][12] = 32,
+ [0][0][RTW89_UK][12] = 32,
+ [0][0][RTW89_FCC][13] = 127,
+ [0][0][RTW89_ETSI][13] = 127,
+ [0][0][RTW89_MKK][13] = 127,
+ [0][0][RTW89_IC][13] = 127,
+ [0][0][RTW89_KCC][13] = 127,
+ [0][0][RTW89_ACMA][13] = 127,
+ [0][0][RTW89_CHILE][13] = 127,
+ [0][0][RTW89_UKRAINE][13] = 127,
+ [0][0][RTW89_MEXICO][13] = 127,
+ [0][0][RTW89_CN][13] = 127,
+ [0][0][RTW89_QATAR][13] = 127,
+ [0][0][RTW89_UK][13] = 127,
+ [0][1][RTW89_FCC][0] = 54,
+ [0][1][RTW89_ETSI][0] = 20,
+ [0][1][RTW89_MKK][0] = 32,
+ [0][1][RTW89_IC][0] = 54,
+ [0][1][RTW89_KCC][0] = 32,
+ [0][1][RTW89_ACMA][0] = 20,
+ [0][1][RTW89_CHILE][0] = 50,
+ [0][1][RTW89_UKRAINE][0] = 20,
+ [0][1][RTW89_MEXICO][0] = 54,
+ [0][1][RTW89_CN][0] = 20,
+ [0][1][RTW89_QATAR][0] = 20,
+ [0][1][RTW89_UK][0] = 20,
+ [0][1][RTW89_FCC][1] = 54,
+ [0][1][RTW89_ETSI][1] = 22,
+ [0][1][RTW89_MKK][1] = 32,
+ [0][1][RTW89_IC][1] = 54,
+ [0][1][RTW89_KCC][1] = 32,
+ [0][1][RTW89_ACMA][1] = 22,
+ [0][1][RTW89_CHILE][1] = 50,
+ [0][1][RTW89_UKRAINE][1] = 22,
+ [0][1][RTW89_MEXICO][1] = 54,
+ [0][1][RTW89_CN][1] = 22,
+ [0][1][RTW89_QATAR][1] = 22,
+ [0][1][RTW89_UK][1] = 22,
+ [0][1][RTW89_FCC][2] = 58,
+ [0][1][RTW89_ETSI][2] = 22,
+ [0][1][RTW89_MKK][2] = 32,
+ [0][1][RTW89_IC][2] = 58,
+ [0][1][RTW89_KCC][2] = 32,
+ [0][1][RTW89_ACMA][2] = 22,
+ [0][1][RTW89_CHILE][2] = 50,
+ [0][1][RTW89_UKRAINE][2] = 22,
+ [0][1][RTW89_MEXICO][2] = 58,
+ [0][1][RTW89_CN][2] = 22,
+ [0][1][RTW89_QATAR][2] = 22,
+ [0][1][RTW89_UK][2] = 22,
+ [0][1][RTW89_FCC][3] = 62,
+ [0][1][RTW89_ETSI][3] = 22,
+ [0][1][RTW89_MKK][3] = 32,
+ [0][1][RTW89_IC][3] = 62,
+ [0][1][RTW89_KCC][3] = 32,
+ [0][1][RTW89_ACMA][3] = 22,
+ [0][1][RTW89_CHILE][3] = 50,
+ [0][1][RTW89_UKRAINE][3] = 22,
+ [0][1][RTW89_MEXICO][3] = 62,
+ [0][1][RTW89_CN][3] = 22,
+ [0][1][RTW89_QATAR][3] = 22,
+ [0][1][RTW89_UK][3] = 22,
+ [0][1][RTW89_FCC][4] = 66,
+ [0][1][RTW89_ETSI][4] = 22,
+ [0][1][RTW89_MKK][4] = 32,
+ [0][1][RTW89_IC][4] = 66,
+ [0][1][RTW89_KCC][4] = 30,
+ [0][1][RTW89_ACMA][4] = 22,
+ [0][1][RTW89_CHILE][4] = 50,
+ [0][1][RTW89_UKRAINE][4] = 22,
+ [0][1][RTW89_MEXICO][4] = 66,
+ [0][1][RTW89_CN][4] = 22,
+ [0][1][RTW89_QATAR][4] = 22,
+ [0][1][RTW89_UK][4] = 22,
+ [0][1][RTW89_FCC][5] = 74,
+ [0][1][RTW89_ETSI][5] = 22,
+ [0][1][RTW89_MKK][5] = 32,
+ [0][1][RTW89_IC][5] = 74,
+ [0][1][RTW89_KCC][5] = 30,
+ [0][1][RTW89_ACMA][5] = 22,
+ [0][1][RTW89_CHILE][5] = 52,
+ [0][1][RTW89_UKRAINE][5] = 22,
+ [0][1][RTW89_MEXICO][5] = 74,
+ [0][1][RTW89_CN][5] = 22,
+ [0][1][RTW89_QATAR][5] = 22,
+ [0][1][RTW89_UK][5] = 22,
+ [0][1][RTW89_FCC][6] = 66,
+ [0][1][RTW89_ETSI][6] = 22,
+ [0][1][RTW89_MKK][6] = 30,
+ [0][1][RTW89_IC][6] = 66,
+ [0][1][RTW89_KCC][6] = 30,
+ [0][1][RTW89_ACMA][6] = 22,
+ [0][1][RTW89_CHILE][6] = 50,
+ [0][1][RTW89_UKRAINE][6] = 22,
+ [0][1][RTW89_MEXICO][6] = 66,
+ [0][1][RTW89_CN][6] = 22,
+ [0][1][RTW89_QATAR][6] = 22,
+ [0][1][RTW89_UK][6] = 22,
+ [0][1][RTW89_FCC][7] = 62,
+ [0][1][RTW89_ETSI][7] = 22,
+ [0][1][RTW89_MKK][7] = 32,
+ [0][1][RTW89_IC][7] = 62,
+ [0][1][RTW89_KCC][7] = 30,
+ [0][1][RTW89_ACMA][7] = 22,
+ [0][1][RTW89_CHILE][7] = 50,
+ [0][1][RTW89_UKRAINE][7] = 22,
+ [0][1][RTW89_MEXICO][7] = 62,
+ [0][1][RTW89_CN][7] = 22,
+ [0][1][RTW89_QATAR][7] = 22,
+ [0][1][RTW89_UK][7] = 22,
+ [0][1][RTW89_FCC][8] = 58,
+ [0][1][RTW89_ETSI][8] = 22,
+ [0][1][RTW89_MKK][8] = 32,
+ [0][1][RTW89_IC][8] = 58,
+ [0][1][RTW89_KCC][8] = 30,
+ [0][1][RTW89_ACMA][8] = 22,
+ [0][1][RTW89_CHILE][8] = 50,
+ [0][1][RTW89_UKRAINE][8] = 22,
+ [0][1][RTW89_MEXICO][8] = 58,
+ [0][1][RTW89_CN][8] = 22,
+ [0][1][RTW89_QATAR][8] = 22,
+ [0][1][RTW89_UK][8] = 22,
+ [0][1][RTW89_FCC][9] = 54,
+ [0][1][RTW89_ETSI][9] = 22,
+ [0][1][RTW89_MKK][9] = 32,
+ [0][1][RTW89_IC][9] = 54,
+ [0][1][RTW89_KCC][9] = 30,
+ [0][1][RTW89_ACMA][9] = 22,
+ [0][1][RTW89_CHILE][9] = 50,
+ [0][1][RTW89_UKRAINE][9] = 22,
+ [0][1][RTW89_MEXICO][9] = 54,
+ [0][1][RTW89_CN][9] = 22,
+ [0][1][RTW89_QATAR][9] = 22,
+ [0][1][RTW89_UK][9] = 22,
+ [0][1][RTW89_FCC][10] = 54,
+ [0][1][RTW89_ETSI][10] = 22,
+ [0][1][RTW89_MKK][10] = 32,
+ [0][1][RTW89_IC][10] = 54,
+ [0][1][RTW89_KCC][10] = 30,
+ [0][1][RTW89_ACMA][10] = 22,
+ [0][1][RTW89_CHILE][10] = 50,
+ [0][1][RTW89_UKRAINE][10] = 22,
+ [0][1][RTW89_MEXICO][10] = 54,
+ [0][1][RTW89_CN][10] = 22,
+ [0][1][RTW89_QATAR][10] = 22,
+ [0][1][RTW89_UK][10] = 22,
+ [0][1][RTW89_FCC][11] = 38,
+ [0][1][RTW89_ETSI][11] = 22,
+ [0][1][RTW89_MKK][11] = 32,
+ [0][1][RTW89_IC][11] = 38,
+ [0][1][RTW89_KCC][11] = 30,
+ [0][1][RTW89_ACMA][11] = 22,
+ [0][1][RTW89_CHILE][11] = 50,
+ [0][1][RTW89_UKRAINE][11] = 22,
+ [0][1][RTW89_MEXICO][11] = 38,
+ [0][1][RTW89_CN][11] = 22,
+ [0][1][RTW89_QATAR][11] = 22,
+ [0][1][RTW89_UK][11] = 22,
+ [0][1][RTW89_FCC][12] = 30,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_MKK][12] = 30,
+ [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_KCC][12] = 30,
+ [0][1][RTW89_ACMA][12] = 20,
+ [0][1][RTW89_CHILE][12] = 50,
+ [0][1][RTW89_UKRAINE][12] = 20,
+ [0][1][RTW89_MEXICO][12] = 30,
+ [0][1][RTW89_CN][12] = 20,
+ [0][1][RTW89_QATAR][12] = 20,
+ [0][1][RTW89_UK][12] = 20,
+ [0][1][RTW89_FCC][13] = 127,
+ [0][1][RTW89_ETSI][13] = 127,
+ [0][1][RTW89_MKK][13] = 127,
+ [0][1][RTW89_IC][13] = 127,
+ [0][1][RTW89_KCC][13] = 127,
+ [0][1][RTW89_ACMA][13] = 127,
+ [0][1][RTW89_CHILE][13] = 127,
+ [0][1][RTW89_UKRAINE][13] = 127,
+ [0][1][RTW89_MEXICO][13] = 127,
+ [0][1][RTW89_CN][13] = 127,
+ [0][1][RTW89_QATAR][13] = 127,
+ [0][1][RTW89_UK][13] = 127,
+ [1][0][RTW89_FCC][0] = 72,
+ [1][0][RTW89_ETSI][0] = 42,
+ [1][0][RTW89_MKK][0] = 52,
+ [1][0][RTW89_IC][0] = 72,
+ [1][0][RTW89_KCC][0] = 52,
+ [1][0][RTW89_ACMA][0] = 42,
+ [1][0][RTW89_CHILE][0] = 68,
+ [1][0][RTW89_UKRAINE][0] = 42,
+ [1][0][RTW89_MEXICO][0] = 72,
+ [1][0][RTW89_CN][0] = 42,
+ [1][0][RTW89_QATAR][0] = 42,
+ [1][0][RTW89_UK][0] = 42,
+ [1][0][RTW89_FCC][1] = 72,
+ [1][0][RTW89_ETSI][1] = 44,
+ [1][0][RTW89_MKK][1] = 52,
+ [1][0][RTW89_IC][1] = 72,
+ [1][0][RTW89_KCC][1] = 52,
+ [1][0][RTW89_ACMA][1] = 44,
+ [1][0][RTW89_CHILE][1] = 68,
+ [1][0][RTW89_UKRAINE][1] = 44,
+ [1][0][RTW89_MEXICO][1] = 72,
+ [1][0][RTW89_CN][1] = 44,
+ [1][0][RTW89_QATAR][1] = 44,
+ [1][0][RTW89_UK][1] = 44,
+ [1][0][RTW89_FCC][2] = 76,
+ [1][0][RTW89_ETSI][2] = 44,
+ [1][0][RTW89_MKK][2] = 52,
+ [1][0][RTW89_IC][2] = 76,
+ [1][0][RTW89_KCC][2] = 52,
+ [1][0][RTW89_ACMA][2] = 44,
+ [1][0][RTW89_CHILE][2] = 68,
+ [1][0][RTW89_UKRAINE][2] = 44,
+ [1][0][RTW89_MEXICO][2] = 76,
+ [1][0][RTW89_CN][2] = 44,
+ [1][0][RTW89_QATAR][2] = 44,
+ [1][0][RTW89_UK][2] = 44,
+ [1][0][RTW89_FCC][3] = 78,
+ [1][0][RTW89_ETSI][3] = 44,
+ [1][0][RTW89_MKK][3] = 52,
+ [1][0][RTW89_IC][3] = 78,
+ [1][0][RTW89_KCC][3] = 52,
+ [1][0][RTW89_ACMA][3] = 44,
+ [1][0][RTW89_CHILE][3] = 68,
+ [1][0][RTW89_UKRAINE][3] = 44,
+ [1][0][RTW89_MEXICO][3] = 78,
+ [1][0][RTW89_CN][3] = 44,
+ [1][0][RTW89_QATAR][3] = 44,
+ [1][0][RTW89_UK][3] = 44,
+ [1][0][RTW89_FCC][4] = 78,
+ [1][0][RTW89_ETSI][4] = 44,
+ [1][0][RTW89_MKK][4] = 52,
+ [1][0][RTW89_IC][4] = 78,
+ [1][0][RTW89_KCC][4] = 52,
+ [1][0][RTW89_ACMA][4] = 44,
+ [1][0][RTW89_CHILE][4] = 68,
+ [1][0][RTW89_UKRAINE][4] = 44,
+ [1][0][RTW89_MEXICO][4] = 78,
+ [1][0][RTW89_CN][4] = 44,
+ [1][0][RTW89_QATAR][4] = 44,
+ [1][0][RTW89_UK][4] = 44,
+ [1][0][RTW89_FCC][5] = 84,
+ [1][0][RTW89_ETSI][5] = 44,
+ [1][0][RTW89_MKK][5] = 52,
+ [1][0][RTW89_IC][5] = 84,
+ [1][0][RTW89_KCC][5] = 52,
+ [1][0][RTW89_ACMA][5] = 44,
+ [1][0][RTW89_CHILE][5] = 68,
+ [1][0][RTW89_UKRAINE][5] = 44,
+ [1][0][RTW89_MEXICO][5] = 84,
+ [1][0][RTW89_CN][5] = 44,
+ [1][0][RTW89_QATAR][5] = 44,
+ [1][0][RTW89_UK][5] = 44,
+ [1][0][RTW89_FCC][6] = 72,
+ [1][0][RTW89_ETSI][6] = 44,
+ [1][0][RTW89_MKK][6] = 52,
+ [1][0][RTW89_IC][6] = 72,
+ [1][0][RTW89_KCC][6] = 52,
+ [1][0][RTW89_ACMA][6] = 44,
+ [1][0][RTW89_CHILE][6] = 68,
+ [1][0][RTW89_UKRAINE][6] = 44,
+ [1][0][RTW89_MEXICO][6] = 72,
+ [1][0][RTW89_CN][6] = 44,
+ [1][0][RTW89_QATAR][6] = 44,
+ [1][0][RTW89_UK][6] = 44,
+ [1][0][RTW89_FCC][7] = 72,
+ [1][0][RTW89_ETSI][7] = 44,
+ [1][0][RTW89_MKK][7] = 52,
+ [1][0][RTW89_IC][7] = 72,
+ [1][0][RTW89_KCC][7] = 52,
+ [1][0][RTW89_ACMA][7] = 44,
+ [1][0][RTW89_CHILE][7] = 68,
+ [1][0][RTW89_UKRAINE][7] = 44,
+ [1][0][RTW89_MEXICO][7] = 72,
+ [1][0][RTW89_CN][7] = 44,
+ [1][0][RTW89_QATAR][7] = 44,
+ [1][0][RTW89_UK][7] = 44,
+ [1][0][RTW89_FCC][8] = 72,
+ [1][0][RTW89_ETSI][8] = 44,
+ [1][0][RTW89_MKK][8] = 52,
+ [1][0][RTW89_IC][8] = 72,
+ [1][0][RTW89_KCC][8] = 52,
+ [1][0][RTW89_ACMA][8] = 44,
+ [1][0][RTW89_CHILE][8] = 68,
+ [1][0][RTW89_UKRAINE][8] = 44,
+ [1][0][RTW89_MEXICO][8] = 72,
+ [1][0][RTW89_CN][8] = 44,
+ [1][0][RTW89_QATAR][8] = 44,
+ [1][0][RTW89_UK][8] = 44,
+ [1][0][RTW89_FCC][9] = 68,
+ [1][0][RTW89_ETSI][9] = 44,
+ [1][0][RTW89_MKK][9] = 52,
+ [1][0][RTW89_IC][9] = 68,
+ [1][0][RTW89_KCC][9] = 52,
+ [1][0][RTW89_ACMA][9] = 44,
+ [1][0][RTW89_CHILE][9] = 68,
+ [1][0][RTW89_UKRAINE][9] = 44,
+ [1][0][RTW89_MEXICO][9] = 68,
+ [1][0][RTW89_CN][9] = 44,
+ [1][0][RTW89_QATAR][9] = 44,
+ [1][0][RTW89_UK][9] = 44,
+ [1][0][RTW89_FCC][10] = 68,
+ [1][0][RTW89_ETSI][10] = 44,
+ [1][0][RTW89_MKK][10] = 52,
+ [1][0][RTW89_IC][10] = 68,
+ [1][0][RTW89_KCC][10] = 52,
+ [1][0][RTW89_ACMA][10] = 44,
+ [1][0][RTW89_CHILE][10] = 70,
+ [1][0][RTW89_UKRAINE][10] = 44,
+ [1][0][RTW89_MEXICO][10] = 68,
+ [1][0][RTW89_CN][10] = 44,
+ [1][0][RTW89_QATAR][10] = 44,
+ [1][0][RTW89_UK][10] = 44,
+ [1][0][RTW89_FCC][11] = 50,
+ [1][0][RTW89_ETSI][11] = 44,
+ [1][0][RTW89_MKK][11] = 52,
+ [1][0][RTW89_IC][11] = 50,
+ [1][0][RTW89_KCC][11] = 52,
+ [1][0][RTW89_ACMA][11] = 44,
+ [1][0][RTW89_CHILE][11] = 68,
+ [1][0][RTW89_UKRAINE][11] = 44,
+ [1][0][RTW89_MEXICO][11] = 50,
+ [1][0][RTW89_CN][11] = 44,
+ [1][0][RTW89_QATAR][11] = 44,
+ [1][0][RTW89_UK][11] = 44,
+ [1][0][RTW89_FCC][12] = 38,
+ [1][0][RTW89_ETSI][12] = 42,
+ [1][0][RTW89_MKK][12] = 52,
+ [1][0][RTW89_IC][12] = 38,
+ [1][0][RTW89_KCC][12] = 52,
+ [1][0][RTW89_ACMA][12] = 42,
+ [1][0][RTW89_CHILE][12] = 68,
+ [1][0][RTW89_UKRAINE][12] = 42,
+ [1][0][RTW89_MEXICO][12] = 38,
+ [1][0][RTW89_CN][12] = 42,
+ [1][0][RTW89_QATAR][12] = 42,
+ [1][0][RTW89_UK][12] = 42,
+ [1][0][RTW89_FCC][13] = 127,
+ [1][0][RTW89_ETSI][13] = 127,
+ [1][0][RTW89_MKK][13] = 127,
+ [1][0][RTW89_IC][13] = 127,
+ [1][0][RTW89_KCC][13] = 127,
+ [1][0][RTW89_ACMA][13] = 127,
+ [1][0][RTW89_CHILE][13] = 127,
+ [1][0][RTW89_UKRAINE][13] = 127,
+ [1][0][RTW89_MEXICO][13] = 127,
+ [1][0][RTW89_CN][13] = 127,
+ [1][0][RTW89_QATAR][13] = 127,
+ [1][0][RTW89_UK][13] = 127,
+ [1][1][RTW89_FCC][0] = 54,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_MKK][0] = 40,
+ [1][1][RTW89_IC][0] = 54,
+ [1][1][RTW89_KCC][0] = 40,
+ [1][1][RTW89_ACMA][0] = 32,
+ [1][1][RTW89_CHILE][0] = 54,
+ [1][1][RTW89_UKRAINE][0] = 32,
+ [1][1][RTW89_MEXICO][0] = 54,
+ [1][1][RTW89_CN][0] = 32,
+ [1][1][RTW89_QATAR][0] = 32,
+ [1][1][RTW89_UK][0] = 32,
+ [1][1][RTW89_FCC][1] = 54,
+ [1][1][RTW89_ETSI][1] = 32,
+ [1][1][RTW89_MKK][1] = 40,
+ [1][1][RTW89_IC][1] = 54,
+ [1][1][RTW89_KCC][1] = 40,
+ [1][1][RTW89_ACMA][1] = 32,
+ [1][1][RTW89_CHILE][1] = 54,
+ [1][1][RTW89_UKRAINE][1] = 32,
+ [1][1][RTW89_MEXICO][1] = 54,
+ [1][1][RTW89_CN][1] = 32,
+ [1][1][RTW89_QATAR][1] = 32,
+ [1][1][RTW89_UK][1] = 32,
+ [1][1][RTW89_FCC][2] = 58,
+ [1][1][RTW89_ETSI][2] = 32,
+ [1][1][RTW89_MKK][2] = 40,
+ [1][1][RTW89_IC][2] = 58,
+ [1][1][RTW89_KCC][2] = 40,
+ [1][1][RTW89_ACMA][2] = 32,
+ [1][1][RTW89_CHILE][2] = 54,
+ [1][1][RTW89_UKRAINE][2] = 32,
+ [1][1][RTW89_MEXICO][2] = 58,
+ [1][1][RTW89_CN][2] = 32,
+ [1][1][RTW89_QATAR][2] = 32,
+ [1][1][RTW89_UK][2] = 32,
+ [1][1][RTW89_FCC][3] = 62,
+ [1][1][RTW89_ETSI][3] = 32,
+ [1][1][RTW89_MKK][3] = 40,
+ [1][1][RTW89_IC][3] = 62,
+ [1][1][RTW89_KCC][3] = 40,
+ [1][1][RTW89_ACMA][3] = 32,
+ [1][1][RTW89_CHILE][3] = 54,
+ [1][1][RTW89_UKRAINE][3] = 32,
+ [1][1][RTW89_MEXICO][3] = 62,
+ [1][1][RTW89_CN][3] = 32,
+ [1][1][RTW89_QATAR][3] = 32,
+ [1][1][RTW89_UK][3] = 32,
+ [1][1][RTW89_FCC][4] = 66,
+ [1][1][RTW89_ETSI][4] = 32,
+ [1][1][RTW89_MKK][4] = 40,
+ [1][1][RTW89_IC][4] = 66,
+ [1][1][RTW89_KCC][4] = 40,
+ [1][1][RTW89_ACMA][4] = 32,
+ [1][1][RTW89_CHILE][4] = 54,
+ [1][1][RTW89_UKRAINE][4] = 32,
+ [1][1][RTW89_MEXICO][4] = 66,
+ [1][1][RTW89_CN][4] = 32,
+ [1][1][RTW89_QATAR][4] = 32,
+ [1][1][RTW89_UK][4] = 32,
+ [1][1][RTW89_FCC][5] = 74,
+ [1][1][RTW89_ETSI][5] = 32,
+ [1][1][RTW89_MKK][5] = 40,
+ [1][1][RTW89_IC][5] = 74,
+ [1][1][RTW89_KCC][5] = 40,
+ [1][1][RTW89_ACMA][5] = 32,
+ [1][1][RTW89_CHILE][5] = 54,
+ [1][1][RTW89_UKRAINE][5] = 32,
+ [1][1][RTW89_MEXICO][5] = 74,
+ [1][1][RTW89_CN][5] = 32,
+ [1][1][RTW89_QATAR][5] = 32,
+ [1][1][RTW89_UK][5] = 32,
+ [1][1][RTW89_FCC][6] = 66,
+ [1][1][RTW89_ETSI][6] = 32,
+ [1][1][RTW89_MKK][6] = 40,
+ [1][1][RTW89_IC][6] = 66,
+ [1][1][RTW89_KCC][6] = 40,
+ [1][1][RTW89_ACMA][6] = 32,
+ [1][1][RTW89_CHILE][6] = 54,
+ [1][1][RTW89_UKRAINE][6] = 32,
+ [1][1][RTW89_MEXICO][6] = 66,
+ [1][1][RTW89_CN][6] = 32,
+ [1][1][RTW89_QATAR][6] = 32,
+ [1][1][RTW89_UK][6] = 32,
+ [1][1][RTW89_FCC][7] = 62,
+ [1][1][RTW89_ETSI][7] = 32,
+ [1][1][RTW89_MKK][7] = 40,
+ [1][1][RTW89_IC][7] = 62,
+ [1][1][RTW89_KCC][7] = 40,
+ [1][1][RTW89_ACMA][7] = 32,
+ [1][1][RTW89_CHILE][7] = 54,
+ [1][1][RTW89_UKRAINE][7] = 32,
+ [1][1][RTW89_MEXICO][7] = 62,
+ [1][1][RTW89_CN][7] = 32,
+ [1][1][RTW89_QATAR][7] = 32,
+ [1][1][RTW89_UK][7] = 32,
+ [1][1][RTW89_FCC][8] = 58,
+ [1][1][RTW89_ETSI][8] = 32,
+ [1][1][RTW89_MKK][8] = 40,
+ [1][1][RTW89_IC][8] = 58,
+ [1][1][RTW89_KCC][8] = 40,
+ [1][1][RTW89_ACMA][8] = 32,
+ [1][1][RTW89_CHILE][8] = 54,
+ [1][1][RTW89_UKRAINE][8] = 32,
+ [1][1][RTW89_MEXICO][8] = 58,
+ [1][1][RTW89_CN][8] = 32,
+ [1][1][RTW89_QATAR][8] = 32,
+ [1][1][RTW89_UK][8] = 32,
+ [1][1][RTW89_FCC][9] = 54,
+ [1][1][RTW89_ETSI][9] = 32,
+ [1][1][RTW89_MKK][9] = 40,
+ [1][1][RTW89_IC][9] = 54,
+ [1][1][RTW89_KCC][9] = 40,
+ [1][1][RTW89_ACMA][9] = 32,
+ [1][1][RTW89_CHILE][9] = 54,
+ [1][1][RTW89_UKRAINE][9] = 32,
+ [1][1][RTW89_MEXICO][9] = 54,
+ [1][1][RTW89_CN][9] = 32,
+ [1][1][RTW89_QATAR][9] = 32,
+ [1][1][RTW89_UK][9] = 32,
+ [1][1][RTW89_FCC][10] = 54,
+ [1][1][RTW89_ETSI][10] = 32,
+ [1][1][RTW89_MKK][10] = 40,
+ [1][1][RTW89_IC][10] = 54,
+ [1][1][RTW89_KCC][10] = 40,
+ [1][1][RTW89_ACMA][10] = 32,
+ [1][1][RTW89_CHILE][10] = 54,
+ [1][1][RTW89_UKRAINE][10] = 32,
+ [1][1][RTW89_MEXICO][10] = 54,
+ [1][1][RTW89_CN][10] = 32,
+ [1][1][RTW89_QATAR][10] = 32,
+ [1][1][RTW89_UK][10] = 32,
+ [1][1][RTW89_FCC][11] = 38,
+ [1][1][RTW89_ETSI][11] = 32,
+ [1][1][RTW89_MKK][11] = 40,
+ [1][1][RTW89_IC][11] = 38,
+ [1][1][RTW89_KCC][11] = 40,
+ [1][1][RTW89_ACMA][11] = 32,
+ [1][1][RTW89_CHILE][11] = 54,
+ [1][1][RTW89_UKRAINE][11] = 32,
+ [1][1][RTW89_MEXICO][11] = 38,
+ [1][1][RTW89_CN][11] = 32,
+ [1][1][RTW89_QATAR][11] = 32,
+ [1][1][RTW89_UK][11] = 32,
+ [1][1][RTW89_FCC][12] = 32,
+ [1][1][RTW89_ETSI][12] = 32,
+ [1][1][RTW89_MKK][12] = 40,
+ [1][1][RTW89_IC][12] = 32,
+ [1][1][RTW89_KCC][12] = 40,
+ [1][1][RTW89_ACMA][12] = 32,
+ [1][1][RTW89_CHILE][12] = 54,
+ [1][1][RTW89_UKRAINE][12] = 32,
+ [1][1][RTW89_MEXICO][12] = 32,
+ [1][1][RTW89_CN][12] = 32,
+ [1][1][RTW89_QATAR][12] = 32,
+ [1][1][RTW89_UK][12] = 32,
+ [1][1][RTW89_FCC][13] = 127,
+ [1][1][RTW89_ETSI][13] = 127,
+ [1][1][RTW89_MKK][13] = 127,
+ [1][1][RTW89_IC][13] = 127,
+ [1][1][RTW89_KCC][13] = 127,
+ [1][1][RTW89_ACMA][13] = 127,
+ [1][1][RTW89_CHILE][13] = 127,
+ [1][1][RTW89_UKRAINE][13] = 127,
+ [1][1][RTW89_MEXICO][13] = 127,
+ [1][1][RTW89_CN][13] = 127,
+ [1][1][RTW89_QATAR][13] = 127,
+ [1][1][RTW89_UK][13] = 127,
+ [2][0][RTW89_FCC][0] = 72,
+ [2][0][RTW89_ETSI][0] = 56,
+ [2][0][RTW89_MKK][0] = 64,
+ [2][0][RTW89_IC][0] = 72,
+ [2][0][RTW89_KCC][0] = 66,
+ [2][0][RTW89_ACMA][0] = 56,
+ [2][0][RTW89_CHILE][0] = 68,
+ [2][0][RTW89_UKRAINE][0] = 56,
+ [2][0][RTW89_MEXICO][0] = 72,
+ [2][0][RTW89_CN][0] = 56,
+ [2][0][RTW89_QATAR][0] = 56,
+ [2][0][RTW89_UK][0] = 56,
+ [2][0][RTW89_FCC][1] = 72,
+ [2][0][RTW89_ETSI][1] = 56,
+ [2][0][RTW89_MKK][1] = 64,
+ [2][0][RTW89_IC][1] = 72,
+ [2][0][RTW89_KCC][1] = 66,
+ [2][0][RTW89_ACMA][1] = 56,
+ [2][0][RTW89_CHILE][1] = 68,
+ [2][0][RTW89_UKRAINE][1] = 56,
+ [2][0][RTW89_MEXICO][1] = 72,
+ [2][0][RTW89_CN][1] = 56,
+ [2][0][RTW89_QATAR][1] = 56,
+ [2][0][RTW89_UK][1] = 56,
+ [2][0][RTW89_FCC][2] = 74,
+ [2][0][RTW89_ETSI][2] = 56,
+ [2][0][RTW89_MKK][2] = 64,
+ [2][0][RTW89_IC][2] = 74,
+ [2][0][RTW89_KCC][2] = 66,
+ [2][0][RTW89_ACMA][2] = 56,
+ [2][0][RTW89_CHILE][2] = 68,
+ [2][0][RTW89_UKRAINE][2] = 56,
+ [2][0][RTW89_MEXICO][2] = 74,
+ [2][0][RTW89_CN][2] = 56,
+ [2][0][RTW89_QATAR][2] = 56,
+ [2][0][RTW89_UK][2] = 56,
+ [2][0][RTW89_FCC][3] = 74,
+ [2][0][RTW89_ETSI][3] = 56,
+ [2][0][RTW89_MKK][3] = 64,
+ [2][0][RTW89_IC][3] = 74,
+ [2][0][RTW89_KCC][3] = 66,
+ [2][0][RTW89_ACMA][3] = 56,
+ [2][0][RTW89_CHILE][3] = 68,
+ [2][0][RTW89_UKRAINE][3] = 56,
+ [2][0][RTW89_MEXICO][3] = 74,
+ [2][0][RTW89_CN][3] = 56,
+ [2][0][RTW89_QATAR][3] = 56,
+ [2][0][RTW89_UK][3] = 56,
+ [2][0][RTW89_FCC][4] = 74,
+ [2][0][RTW89_ETSI][4] = 56,
+ [2][0][RTW89_MKK][4] = 64,
+ [2][0][RTW89_IC][4] = 74,
+ [2][0][RTW89_KCC][4] = 66,
+ [2][0][RTW89_ACMA][4] = 56,
+ [2][0][RTW89_CHILE][4] = 68,
+ [2][0][RTW89_UKRAINE][4] = 56,
+ [2][0][RTW89_MEXICO][4] = 74,
+ [2][0][RTW89_CN][4] = 56,
+ [2][0][RTW89_QATAR][4] = 56,
+ [2][0][RTW89_UK][4] = 56,
+ [2][0][RTW89_FCC][5] = 84,
+ [2][0][RTW89_ETSI][5] = 56,
+ [2][0][RTW89_MKK][5] = 64,
+ [2][0][RTW89_IC][5] = 84,
+ [2][0][RTW89_KCC][5] = 66,
+ [2][0][RTW89_ACMA][5] = 56,
+ [2][0][RTW89_CHILE][5] = 70,
+ [2][0][RTW89_UKRAINE][5] = 56,
+ [2][0][RTW89_MEXICO][5] = 84,
+ [2][0][RTW89_CN][5] = 56,
+ [2][0][RTW89_QATAR][5] = 56,
+ [2][0][RTW89_UK][5] = 56,
+ [2][0][RTW89_FCC][6] = 70,
+ [2][0][RTW89_ETSI][6] = 56,
+ [2][0][RTW89_MKK][6] = 64,
+ [2][0][RTW89_IC][6] = 70,
+ [2][0][RTW89_KCC][6] = 66,
+ [2][0][RTW89_ACMA][6] = 56,
+ [2][0][RTW89_CHILE][6] = 68,
+ [2][0][RTW89_UKRAINE][6] = 56,
+ [2][0][RTW89_MEXICO][6] = 70,
+ [2][0][RTW89_CN][6] = 56,
+ [2][0][RTW89_QATAR][6] = 56,
+ [2][0][RTW89_UK][6] = 56,
+ [2][0][RTW89_FCC][7] = 70,
+ [2][0][RTW89_ETSI][7] = 56,
+ [2][0][RTW89_MKK][7] = 64,
+ [2][0][RTW89_IC][7] = 70,
+ [2][0][RTW89_KCC][7] = 66,
+ [2][0][RTW89_ACMA][7] = 56,
+ [2][0][RTW89_CHILE][7] = 68,
+ [2][0][RTW89_UKRAINE][7] = 56,
+ [2][0][RTW89_MEXICO][7] = 70,
+ [2][0][RTW89_CN][7] = 56,
+ [2][0][RTW89_QATAR][7] = 56,
+ [2][0][RTW89_UK][7] = 56,
+ [2][0][RTW89_FCC][8] = 70,
+ [2][0][RTW89_ETSI][8] = 56,
+ [2][0][RTW89_MKK][8] = 64,
+ [2][0][RTW89_IC][8] = 70,
+ [2][0][RTW89_KCC][8] = 66,
+ [2][0][RTW89_ACMA][8] = 56,
+ [2][0][RTW89_CHILE][8] = 68,
+ [2][0][RTW89_UKRAINE][8] = 56,
+ [2][0][RTW89_MEXICO][8] = 70,
+ [2][0][RTW89_CN][8] = 56,
+ [2][0][RTW89_QATAR][8] = 56,
+ [2][0][RTW89_UK][8] = 56,
+ [2][0][RTW89_FCC][9] = 68,
+ [2][0][RTW89_ETSI][9] = 56,
+ [2][0][RTW89_MKK][9] = 64,
+ [2][0][RTW89_IC][9] = 68,
+ [2][0][RTW89_KCC][9] = 66,
+ [2][0][RTW89_ACMA][9] = 56,
+ [2][0][RTW89_CHILE][9] = 68,
+ [2][0][RTW89_UKRAINE][9] = 56,
+ [2][0][RTW89_MEXICO][9] = 68,
+ [2][0][RTW89_CN][9] = 56,
+ [2][0][RTW89_QATAR][9] = 56,
+ [2][0][RTW89_UK][9] = 56,
+ [2][0][RTW89_FCC][10] = 68,
+ [2][0][RTW89_ETSI][10] = 56,
+ [2][0][RTW89_MKK][10] = 64,
+ [2][0][RTW89_IC][10] = 68,
+ [2][0][RTW89_KCC][10] = 66,
+ [2][0][RTW89_ACMA][10] = 56,
+ [2][0][RTW89_CHILE][10] = 68,
+ [2][0][RTW89_UKRAINE][10] = 56,
+ [2][0][RTW89_MEXICO][10] = 68,
+ [2][0][RTW89_CN][10] = 56,
+ [2][0][RTW89_QATAR][10] = 56,
+ [2][0][RTW89_UK][10] = 56,
+ [2][0][RTW89_FCC][11] = 50,
+ [2][0][RTW89_ETSI][11] = 56,
+ [2][0][RTW89_MKK][11] = 64,
+ [2][0][RTW89_IC][11] = 50,
+ [2][0][RTW89_KCC][11] = 66,
+ [2][0][RTW89_ACMA][11] = 56,
+ [2][0][RTW89_CHILE][11] = 68,
+ [2][0][RTW89_UKRAINE][11] = 56,
+ [2][0][RTW89_MEXICO][11] = 50,
+ [2][0][RTW89_CN][11] = 56,
+ [2][0][RTW89_QATAR][11] = 56,
+ [2][0][RTW89_UK][11] = 56,
+ [2][0][RTW89_FCC][12] = 46,
+ [2][0][RTW89_ETSI][12] = 56,
+ [2][0][RTW89_MKK][12] = 64,
+ [2][0][RTW89_IC][12] = 46,
+ [2][0][RTW89_KCC][12] = 66,
+ [2][0][RTW89_ACMA][12] = 56,
+ [2][0][RTW89_CHILE][12] = 68,
+ [2][0][RTW89_UKRAINE][12] = 56,
+ [2][0][RTW89_MEXICO][12] = 46,
+ [2][0][RTW89_CN][12] = 56,
+ [2][0][RTW89_QATAR][12] = 56,
+ [2][0][RTW89_UK][12] = 56,
+ [2][0][RTW89_FCC][13] = 127,
+ [2][0][RTW89_ETSI][13] = 127,
+ [2][0][RTW89_MKK][13] = 127,
+ [2][0][RTW89_IC][13] = 127,
+ [2][0][RTW89_KCC][13] = 127,
+ [2][0][RTW89_ACMA][13] = 127,
+ [2][0][RTW89_CHILE][13] = 127,
+ [2][0][RTW89_UKRAINE][13] = 127,
+ [2][0][RTW89_MEXICO][13] = 127,
+ [2][0][RTW89_CN][13] = 127,
+ [2][0][RTW89_QATAR][13] = 127,
+ [2][0][RTW89_UK][13] = 127,
+ [2][1][RTW89_FCC][0] = 54,
+ [2][1][RTW89_ETSI][0] = 44,
+ [2][1][RTW89_MKK][0] = 52,
+ [2][1][RTW89_IC][0] = 54,
+ [2][1][RTW89_KCC][0] = 54,
+ [2][1][RTW89_ACMA][0] = 44,
+ [2][1][RTW89_CHILE][0] = 58,
+ [2][1][RTW89_UKRAINE][0] = 44,
+ [2][1][RTW89_MEXICO][0] = 54,
+ [2][1][RTW89_CN][0] = 44,
+ [2][1][RTW89_QATAR][0] = 44,
+ [2][1][RTW89_UK][0] = 44,
+ [2][1][RTW89_FCC][1] = 54,
+ [2][1][RTW89_ETSI][1] = 44,
+ [2][1][RTW89_MKK][1] = 52,
+ [2][1][RTW89_IC][1] = 54,
+ [2][1][RTW89_KCC][1] = 54,
+ [2][1][RTW89_ACMA][1] = 44,
+ [2][1][RTW89_CHILE][1] = 56,
+ [2][1][RTW89_UKRAINE][1] = 44,
+ [2][1][RTW89_MEXICO][1] = 54,
+ [2][1][RTW89_CN][1] = 44,
+ [2][1][RTW89_QATAR][1] = 44,
+ [2][1][RTW89_UK][1] = 44,
+ [2][1][RTW89_FCC][2] = 58,
+ [2][1][RTW89_ETSI][2] = 44,
+ [2][1][RTW89_MKK][2] = 52,
+ [2][1][RTW89_IC][2] = 58,
+ [2][1][RTW89_KCC][2] = 54,
+ [2][1][RTW89_ACMA][2] = 44,
+ [2][1][RTW89_CHILE][2] = 56,
+ [2][1][RTW89_UKRAINE][2] = 44,
+ [2][1][RTW89_MEXICO][2] = 58,
+ [2][1][RTW89_CN][2] = 44,
+ [2][1][RTW89_QATAR][2] = 44,
+ [2][1][RTW89_UK][2] = 44,
+ [2][1][RTW89_FCC][3] = 62,
+ [2][1][RTW89_ETSI][3] = 44,
+ [2][1][RTW89_MKK][3] = 52,
+ [2][1][RTW89_IC][3] = 62,
+ [2][1][RTW89_KCC][3] = 54,
+ [2][1][RTW89_ACMA][3] = 44,
+ [2][1][RTW89_CHILE][3] = 56,
+ [2][1][RTW89_UKRAINE][3] = 44,
+ [2][1][RTW89_MEXICO][3] = 62,
+ [2][1][RTW89_CN][3] = 44,
+ [2][1][RTW89_QATAR][3] = 44,
+ [2][1][RTW89_UK][3] = 44,
+ [2][1][RTW89_FCC][4] = 64,
+ [2][1][RTW89_ETSI][4] = 44,
+ [2][1][RTW89_MKK][4] = 52,
+ [2][1][RTW89_IC][4] = 64,
+ [2][1][RTW89_KCC][4] = 52,
+ [2][1][RTW89_ACMA][4] = 44,
+ [2][1][RTW89_CHILE][4] = 56,
+ [2][1][RTW89_UKRAINE][4] = 44,
+ [2][1][RTW89_MEXICO][4] = 64,
+ [2][1][RTW89_CN][4] = 44,
+ [2][1][RTW89_QATAR][4] = 44,
+ [2][1][RTW89_UK][4] = 44,
+ [2][1][RTW89_FCC][5] = 80,
+ [2][1][RTW89_ETSI][5] = 44,
+ [2][1][RTW89_MKK][5] = 52,
+ [2][1][RTW89_IC][5] = 80,
+ [2][1][RTW89_KCC][5] = 52,
+ [2][1][RTW89_ACMA][5] = 44,
+ [2][1][RTW89_CHILE][5] = 56,
+ [2][1][RTW89_UKRAINE][5] = 44,
+ [2][1][RTW89_MEXICO][5] = 80,
+ [2][1][RTW89_CN][5] = 44,
+ [2][1][RTW89_QATAR][5] = 44,
+ [2][1][RTW89_UK][5] = 44,
+ [2][1][RTW89_FCC][6] = 62,
+ [2][1][RTW89_ETSI][6] = 44,
+ [2][1][RTW89_MKK][6] = 52,
+ [2][1][RTW89_IC][6] = 62,
+ [2][1][RTW89_KCC][6] = 52,
+ [2][1][RTW89_ACMA][6] = 44,
+ [2][1][RTW89_CHILE][6] = 56,
+ [2][1][RTW89_UKRAINE][6] = 44,
+ [2][1][RTW89_MEXICO][6] = 62,
+ [2][1][RTW89_CN][6] = 44,
+ [2][1][RTW89_QATAR][6] = 44,
+ [2][1][RTW89_UK][6] = 44,
+ [2][1][RTW89_FCC][7] = 62,
+ [2][1][RTW89_ETSI][7] = 44,
+ [2][1][RTW89_MKK][7] = 52,
+ [2][1][RTW89_IC][7] = 62,
+ [2][1][RTW89_KCC][7] = 52,
+ [2][1][RTW89_ACMA][7] = 44,
+ [2][1][RTW89_CHILE][7] = 56,
+ [2][1][RTW89_UKRAINE][7] = 44,
+ [2][1][RTW89_MEXICO][7] = 62,
+ [2][1][RTW89_CN][7] = 44,
+ [2][1][RTW89_QATAR][7] = 44,
+ [2][1][RTW89_UK][7] = 44,
+ [2][1][RTW89_FCC][8] = 58,
+ [2][1][RTW89_ETSI][8] = 44,
+ [2][1][RTW89_MKK][8] = 52,
+ [2][1][RTW89_IC][8] = 58,
+ [2][1][RTW89_KCC][8] = 52,
+ [2][1][RTW89_ACMA][8] = 44,
+ [2][1][RTW89_CHILE][8] = 56,
+ [2][1][RTW89_UKRAINE][8] = 44,
+ [2][1][RTW89_MEXICO][8] = 58,
+ [2][1][RTW89_CN][8] = 44,
+ [2][1][RTW89_QATAR][8] = 44,
+ [2][1][RTW89_UK][8] = 44,
+ [2][1][RTW89_FCC][9] = 54,
+ [2][1][RTW89_ETSI][9] = 44,
+ [2][1][RTW89_MKK][9] = 52,
+ [2][1][RTW89_IC][9] = 54,
+ [2][1][RTW89_KCC][9] = 54,
+ [2][1][RTW89_ACMA][9] = 44,
+ [2][1][RTW89_CHILE][9] = 56,
+ [2][1][RTW89_UKRAINE][9] = 44,
+ [2][1][RTW89_MEXICO][9] = 54,
+ [2][1][RTW89_CN][9] = 44,
+ [2][1][RTW89_QATAR][9] = 44,
+ [2][1][RTW89_UK][9] = 44,
+ [2][1][RTW89_FCC][10] = 54,
+ [2][1][RTW89_ETSI][10] = 44,
+ [2][1][RTW89_MKK][10] = 52,
+ [2][1][RTW89_IC][10] = 54,
+ [2][1][RTW89_KCC][10] = 54,
+ [2][1][RTW89_ACMA][10] = 44,
+ [2][1][RTW89_CHILE][10] = 56,
+ [2][1][RTW89_UKRAINE][10] = 44,
+ [2][1][RTW89_MEXICO][10] = 54,
+ [2][1][RTW89_CN][10] = 44,
+ [2][1][RTW89_QATAR][10] = 44,
+ [2][1][RTW89_UK][10] = 44,
+ [2][1][RTW89_FCC][11] = 38,
+ [2][1][RTW89_ETSI][11] = 44,
+ [2][1][RTW89_MKK][11] = 52,
+ [2][1][RTW89_IC][11] = 38,
+ [2][1][RTW89_KCC][11] = 54,
+ [2][1][RTW89_ACMA][11] = 44,
+ [2][1][RTW89_CHILE][11] = 56,
+ [2][1][RTW89_UKRAINE][11] = 44,
+ [2][1][RTW89_MEXICO][11] = 38,
+ [2][1][RTW89_CN][11] = 44,
+ [2][1][RTW89_QATAR][11] = 44,
+ [2][1][RTW89_UK][11] = 44,
+ [2][1][RTW89_FCC][12] = 34,
+ [2][1][RTW89_ETSI][12] = 42,
+ [2][1][RTW89_MKK][12] = 52,
+ [2][1][RTW89_IC][12] = 34,
+ [2][1][RTW89_KCC][12] = 54,
+ [2][1][RTW89_ACMA][12] = 42,
+ [2][1][RTW89_CHILE][12] = 56,
+ [2][1][RTW89_UKRAINE][12] = 42,
+ [2][1][RTW89_MEXICO][12] = 34,
+ [2][1][RTW89_CN][12] = 42,
+ [2][1][RTW89_QATAR][12] = 42,
+ [2][1][RTW89_UK][12] = 42,
+ [2][1][RTW89_FCC][13] = 127,
+ [2][1][RTW89_ETSI][13] = 127,
+ [2][1][RTW89_MKK][13] = 127,
+ [2][1][RTW89_IC][13] = 127,
+ [2][1][RTW89_KCC][13] = 127,
+ [2][1][RTW89_ACMA][13] = 127,
+ [2][1][RTW89_CHILE][13] = 127,
+ [2][1][RTW89_UKRAINE][13] = 127,
+ [2][1][RTW89_MEXICO][13] = 127,
+ [2][1][RTW89_CN][13] = 127,
+ [2][1][RTW89_QATAR][13] = 127,
+ [2][1][RTW89_UK][13] = 127,
+};
+
+const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 24,
+ [0][0][RTW89_WW][2] = 24,
+ [0][0][RTW89_WW][4] = 24,
+ [0][0][RTW89_WW][6] = 12,
+ [0][0][RTW89_WW][8] = 24,
+ [0][0][RTW89_WW][10] = 24,
+ [0][0][RTW89_WW][12] = 24,
+ [0][0][RTW89_WW][14] = 24,
+ [0][0][RTW89_WW][15] = 24,
+ [0][0][RTW89_WW][17] = 24,
+ [0][0][RTW89_WW][19] = 24,
+ [0][0][RTW89_WW][21] = 24,
+ [0][0][RTW89_WW][23] = 24,
+ [0][0][RTW89_WW][25] = 24,
+ [0][0][RTW89_WW][27] = 24,
+ [0][0][RTW89_WW][29] = 24,
+ [0][0][RTW89_WW][31] = 24,
+ [0][0][RTW89_WW][33] = 24,
+ [0][0][RTW89_WW][35] = 24,
+ [0][0][RTW89_WW][37] = 44,
+ [0][0][RTW89_WW][38] = 26,
+ [0][0][RTW89_WW][40] = 26,
+ [0][0][RTW89_WW][42] = 26,
+ [0][0][RTW89_WW][44] = 26,
+ [0][0][RTW89_WW][46] = 26,
+ [0][0][RTW89_WW][48] = 32,
+ [0][0][RTW89_WW][50] = 32,
+ [0][0][RTW89_WW][52] = 32,
+ [0][1][RTW89_WW][0] = 0,
+ [0][1][RTW89_WW][2] = 4,
+ [0][1][RTW89_WW][4] = 0,
+ [0][1][RTW89_WW][6] = 0,
+ [0][1][RTW89_WW][8] = 12,
+ [0][1][RTW89_WW][10] = 12,
+ [0][1][RTW89_WW][12] = 12,
+ [0][1][RTW89_WW][14] = 12,
+ [0][1][RTW89_WW][15] = 12,
+ [0][1][RTW89_WW][17] = 12,
+ [0][1][RTW89_WW][19] = 12,
+ [0][1][RTW89_WW][21] = 12,
+ [0][1][RTW89_WW][23] = 12,
+ [0][1][RTW89_WW][25] = 12,
+ [0][1][RTW89_WW][27] = 12,
+ [0][1][RTW89_WW][29] = 12,
+ [0][1][RTW89_WW][31] = 12,
+ [0][1][RTW89_WW][33] = 12,
+ [0][1][RTW89_WW][35] = 12,
+ [0][1][RTW89_WW][37] = 30,
+ [0][1][RTW89_WW][38] = 14,
+ [0][1][RTW89_WW][40] = 14,
+ [0][1][RTW89_WW][42] = 14,
+ [0][1][RTW89_WW][44] = 14,
+ [0][1][RTW89_WW][46] = 14,
+ [0][1][RTW89_WW][48] = 20,
+ [0][1][RTW89_WW][50] = 20,
+ [0][1][RTW89_WW][52] = 20,
+ [1][0][RTW89_WW][0] = 34,
+ [1][0][RTW89_WW][2] = 34,
+ [1][0][RTW89_WW][4] = 34,
+ [1][0][RTW89_WW][6] = 26,
+ [1][0][RTW89_WW][8] = 34,
+ [1][0][RTW89_WW][10] = 34,
+ [1][0][RTW89_WW][12] = 34,
+ [1][0][RTW89_WW][14] = 34,
+ [1][0][RTW89_WW][15] = 34,
+ [1][0][RTW89_WW][17] = 34,
+ [1][0][RTW89_WW][19] = 34,
+ [1][0][RTW89_WW][21] = 34,
+ [1][0][RTW89_WW][23] = 34,
+ [1][0][RTW89_WW][25] = 34,
+ [1][0][RTW89_WW][27] = 34,
+ [1][0][RTW89_WW][29] = 34,
+ [1][0][RTW89_WW][31] = 34,
+ [1][0][RTW89_WW][33] = 34,
+ [1][0][RTW89_WW][35] = 34,
+ [1][0][RTW89_WW][37] = 52,
+ [1][0][RTW89_WW][38] = 28,
+ [1][0][RTW89_WW][40] = 28,
+ [1][0][RTW89_WW][42] = 28,
+ [1][0][RTW89_WW][44] = 28,
+ [1][0][RTW89_WW][46] = 28,
+ [1][0][RTW89_WW][48] = 44,
+ [1][0][RTW89_WW][50] = 44,
+ [1][0][RTW89_WW][52] = 44,
+ [1][1][RTW89_WW][0] = 10,
+ [1][1][RTW89_WW][2] = 14,
+ [1][1][RTW89_WW][4] = 10,
+ [1][1][RTW89_WW][6] = 10,
+ [1][1][RTW89_WW][8] = 20,
+ [1][1][RTW89_WW][10] = 20,
+ [1][1][RTW89_WW][12] = 22,
+ [1][1][RTW89_WW][14] = 22,
+ [1][1][RTW89_WW][15] = 22,
+ [1][1][RTW89_WW][17] = 22,
+ [1][1][RTW89_WW][19] = 22,
+ [1][1][RTW89_WW][21] = 22,
+ [1][1][RTW89_WW][23] = 22,
+ [1][1][RTW89_WW][25] = 22,
+ [1][1][RTW89_WW][27] = 22,
+ [1][1][RTW89_WW][29] = 22,
+ [1][1][RTW89_WW][31] = 22,
+ [1][1][RTW89_WW][33] = 22,
+ [1][1][RTW89_WW][35] = 22,
+ [1][1][RTW89_WW][37] = 38,
+ [1][1][RTW89_WW][38] = 16,
+ [1][1][RTW89_WW][40] = 16,
+ [1][1][RTW89_WW][42] = 16,
+ [1][1][RTW89_WW][44] = 16,
+ [1][1][RTW89_WW][46] = 16,
+ [1][1][RTW89_WW][48] = 32,
+ [1][1][RTW89_WW][50] = 32,
+ [1][1][RTW89_WW][52] = 32,
+ [2][0][RTW89_WW][0] = 44,
+ [2][0][RTW89_WW][2] = 44,
+ [2][0][RTW89_WW][4] = 44,
+ [2][0][RTW89_WW][6] = 38,
+ [2][0][RTW89_WW][8] = 48,
+ [2][0][RTW89_WW][10] = 48,
+ [2][0][RTW89_WW][12] = 46,
+ [2][0][RTW89_WW][14] = 46,
+ [2][0][RTW89_WW][15] = 48,
+ [2][0][RTW89_WW][17] = 48,
+ [2][0][RTW89_WW][19] = 48,
+ [2][0][RTW89_WW][21] = 48,
+ [2][0][RTW89_WW][23] = 48,
+ [2][0][RTW89_WW][25] = 48,
+ [2][0][RTW89_WW][27] = 48,
+ [2][0][RTW89_WW][29] = 48,
+ [2][0][RTW89_WW][31] = 48,
+ [2][0][RTW89_WW][33] = 48,
+ [2][0][RTW89_WW][35] = 48,
+ [2][0][RTW89_WW][37] = 64,
+ [2][0][RTW89_WW][38] = 28,
+ [2][0][RTW89_WW][40] = 28,
+ [2][0][RTW89_WW][42] = 28,
+ [2][0][RTW89_WW][44] = 28,
+ [2][0][RTW89_WW][46] = 28,
+ [2][0][RTW89_WW][48] = 56,
+ [2][0][RTW89_WW][50] = 56,
+ [2][0][RTW89_WW][52] = 56,
+ [2][1][RTW89_WW][0] = 20,
+ [2][1][RTW89_WW][2] = 18,
+ [2][1][RTW89_WW][4] = 22,
+ [2][1][RTW89_WW][6] = 22,
+ [2][1][RTW89_WW][8] = 34,
+ [2][1][RTW89_WW][10] = 34,
+ [2][1][RTW89_WW][12] = 36,
+ [2][1][RTW89_WW][14] = 36,
+ [2][1][RTW89_WW][15] = 36,
+ [2][1][RTW89_WW][17] = 36,
+ [2][1][RTW89_WW][19] = 36,
+ [2][1][RTW89_WW][21] = 36,
+ [2][1][RTW89_WW][23] = 36,
+ [2][1][RTW89_WW][25] = 36,
+ [2][1][RTW89_WW][27] = 36,
+ [2][1][RTW89_WW][29] = 36,
+ [2][1][RTW89_WW][31] = 36,
+ [2][1][RTW89_WW][33] = 36,
+ [2][1][RTW89_WW][35] = 36,
+ [2][1][RTW89_WW][37] = 48,
+ [2][1][RTW89_WW][38] = 16,
+ [2][1][RTW89_WW][40] = 16,
+ [2][1][RTW89_WW][42] = 16,
+ [2][1][RTW89_WW][44] = 16,
+ [2][1][RTW89_WW][46] = 16,
+ [2][1][RTW89_WW][48] = 44,
+ [2][1][RTW89_WW][50] = 44,
+ [2][1][RTW89_WW][52] = 44,
+ [0][0][RTW89_FCC][0] = 52,
+ [0][0][RTW89_ETSI][0] = 24,
+ [0][0][RTW89_MKK][0] = 26,
+ [0][0][RTW89_IC][0] = 24,
+ [0][0][RTW89_KCC][0] = 44,
+ [0][0][RTW89_ACMA][0] = 24,
+ [0][0][RTW89_CHILE][0] = 40,
+ [0][0][RTW89_UKRAINE][0] = 24,
+ [0][0][RTW89_MEXICO][0] = 52,
+ [0][0][RTW89_CN][0] = 24,
+ [0][0][RTW89_QATAR][0] = 24,
+ [0][0][RTW89_UK][0] = 24,
+ [0][0][RTW89_FCC][2] = 52,
+ [0][0][RTW89_ETSI][2] = 24,
+ [0][0][RTW89_MKK][2] = 26,
+ [0][0][RTW89_IC][2] = 24,
+ [0][0][RTW89_KCC][2] = 44,
+ [0][0][RTW89_ACMA][2] = 24,
+ [0][0][RTW89_CHILE][2] = 38,
+ [0][0][RTW89_UKRAINE][2] = 24,
+ [0][0][RTW89_MEXICO][2] = 52,
+ [0][0][RTW89_CN][2] = 24,
+ [0][0][RTW89_QATAR][2] = 24,
+ [0][0][RTW89_UK][2] = 24,
+ [0][0][RTW89_FCC][4] = 52,
+ [0][0][RTW89_ETSI][4] = 24,
+ [0][0][RTW89_MKK][4] = 26,
+ [0][0][RTW89_IC][4] = 24,
+ [0][0][RTW89_KCC][4] = 44,
+ [0][0][RTW89_ACMA][4] = 24,
+ [0][0][RTW89_CHILE][4] = 38,
+ [0][0][RTW89_UKRAINE][4] = 24,
+ [0][0][RTW89_MEXICO][4] = 52,
+ [0][0][RTW89_CN][4] = 24,
+ [0][0][RTW89_QATAR][4] = 24,
+ [0][0][RTW89_UK][4] = 24,
+ [0][0][RTW89_FCC][6] = 52,
+ [0][0][RTW89_ETSI][6] = 24,
+ [0][0][RTW89_MKK][6] = 26,
+ [0][0][RTW89_IC][6] = 24,
+ [0][0][RTW89_KCC][6] = 12,
+ [0][0][RTW89_ACMA][6] = 24,
+ [0][0][RTW89_CHILE][6] = 40,
+ [0][0][RTW89_UKRAINE][6] = 24,
+ [0][0][RTW89_MEXICO][6] = 52,
+ [0][0][RTW89_CN][6] = 24,
+ [0][0][RTW89_QATAR][6] = 24,
+ [0][0][RTW89_UK][6] = 24,
+ [0][0][RTW89_FCC][8] = 52,
+ [0][0][RTW89_ETSI][8] = 24,
+ [0][0][RTW89_MKK][8] = 26,
+ [0][0][RTW89_IC][8] = 52,
+ [0][0][RTW89_KCC][8] = 46,
+ [0][0][RTW89_ACMA][8] = 24,
+ [0][0][RTW89_CHILE][8] = 64,
+ [0][0][RTW89_UKRAINE][8] = 24,
+ [0][0][RTW89_MEXICO][8] = 52,
+ [0][0][RTW89_CN][8] = 24,
+ [0][0][RTW89_QATAR][8] = 24,
+ [0][0][RTW89_UK][8] = 24,
+ [0][0][RTW89_FCC][10] = 52,
+ [0][0][RTW89_ETSI][10] = 24,
+ [0][0][RTW89_MKK][10] = 26,
+ [0][0][RTW89_IC][10] = 52,
+ [0][0][RTW89_KCC][10] = 46,
+ [0][0][RTW89_ACMA][10] = 24,
+ [0][0][RTW89_CHILE][10] = 64,
+ [0][0][RTW89_UKRAINE][10] = 24,
+ [0][0][RTW89_MEXICO][10] = 52,
+ [0][0][RTW89_CN][10] = 24,
+ [0][0][RTW89_QATAR][10] = 24,
+ [0][0][RTW89_UK][10] = 24,
+ [0][0][RTW89_FCC][12] = 52,
+ [0][0][RTW89_ETSI][12] = 24,
+ [0][0][RTW89_MKK][12] = 24,
+ [0][0][RTW89_IC][12] = 52,
+ [0][0][RTW89_KCC][12] = 42,
+ [0][0][RTW89_ACMA][12] = 24,
+ [0][0][RTW89_CHILE][12] = 64,
+ [0][0][RTW89_UKRAINE][12] = 24,
+ [0][0][RTW89_MEXICO][12] = 52,
+ [0][0][RTW89_CN][12] = 24,
+ [0][0][RTW89_QATAR][12] = 24,
+ [0][0][RTW89_UK][12] = 24,
+ [0][0][RTW89_FCC][14] = 52,
+ [0][0][RTW89_ETSI][14] = 24,
+ [0][0][RTW89_MKK][14] = 24,
+ [0][0][RTW89_IC][14] = 52,
+ [0][0][RTW89_KCC][14] = 42,
+ [0][0][RTW89_ACMA][14] = 24,
+ [0][0][RTW89_CHILE][14] = 64,
+ [0][0][RTW89_UKRAINE][14] = 24,
+ [0][0][RTW89_MEXICO][14] = 52,
+ [0][0][RTW89_CN][14] = 24,
+ [0][0][RTW89_QATAR][14] = 24,
+ [0][0][RTW89_UK][14] = 24,
+ [0][0][RTW89_FCC][15] = 52,
+ [0][0][RTW89_ETSI][15] = 24,
+ [0][0][RTW89_MKK][15] = 46,
+ [0][0][RTW89_IC][15] = 52,
+ [0][0][RTW89_KCC][15] = 44,
+ [0][0][RTW89_ACMA][15] = 24,
+ [0][0][RTW89_CHILE][15] = 60,
+ [0][0][RTW89_UKRAINE][15] = 24,
+ [0][0][RTW89_MEXICO][15] = 52,
+ [0][0][RTW89_CN][15] = 127,
+ [0][0][RTW89_QATAR][15] = 24,
+ [0][0][RTW89_UK][15] = 24,
+ [0][0][RTW89_FCC][17] = 52,
+ [0][0][RTW89_ETSI][17] = 24,
+ [0][0][RTW89_MKK][17] = 48,
+ [0][0][RTW89_IC][17] = 52,
+ [0][0][RTW89_KCC][17] = 44,
+ [0][0][RTW89_ACMA][17] = 24,
+ [0][0][RTW89_CHILE][17] = 60,
+ [0][0][RTW89_UKRAINE][17] = 24,
+ [0][0][RTW89_MEXICO][17] = 52,
+ [0][0][RTW89_CN][17] = 127,
+ [0][0][RTW89_QATAR][17] = 24,
+ [0][0][RTW89_UK][17] = 24,
+ [0][0][RTW89_FCC][19] = 52,
+ [0][0][RTW89_ETSI][19] = 24,
+ [0][0][RTW89_MKK][19] = 48,
+ [0][0][RTW89_IC][19] = 52,
+ [0][0][RTW89_KCC][19] = 44,
+ [0][0][RTW89_ACMA][19] = 24,
+ [0][0][RTW89_CHILE][19] = 60,
+ [0][0][RTW89_UKRAINE][19] = 24,
+ [0][0][RTW89_MEXICO][19] = 52,
+ [0][0][RTW89_CN][19] = 127,
+ [0][0][RTW89_QATAR][19] = 24,
+ [0][0][RTW89_UK][19] = 24,
+ [0][0][RTW89_FCC][21] = 52,
+ [0][0][RTW89_ETSI][21] = 24,
+ [0][0][RTW89_MKK][21] = 48,
+ [0][0][RTW89_IC][21] = 52,
+ [0][0][RTW89_KCC][21] = 44,
+ [0][0][RTW89_ACMA][21] = 24,
+ [0][0][RTW89_CHILE][21] = 62,
+ [0][0][RTW89_UKRAINE][21] = 24,
+ [0][0][RTW89_MEXICO][21] = 52,
+ [0][0][RTW89_CN][21] = 127,
+ [0][0][RTW89_QATAR][21] = 24,
+ [0][0][RTW89_UK][21] = 24,
+ [0][0][RTW89_FCC][23] = 52,
+ [0][0][RTW89_ETSI][23] = 24,
+ [0][0][RTW89_MKK][23] = 48,
+ [0][0][RTW89_IC][23] = 52,
+ [0][0][RTW89_KCC][23] = 44,
+ [0][0][RTW89_ACMA][23] = 24,
+ [0][0][RTW89_CHILE][23] = 62,
+ [0][0][RTW89_UKRAINE][23] = 24,
+ [0][0][RTW89_MEXICO][23] = 52,
+ [0][0][RTW89_CN][23] = 127,
+ [0][0][RTW89_QATAR][23] = 24,
+ [0][0][RTW89_UK][23] = 24,
+ [0][0][RTW89_FCC][25] = 52,
+ [0][0][RTW89_ETSI][25] = 24,
+ [0][0][RTW89_MKK][25] = 48,
+ [0][0][RTW89_IC][25] = 127,
+ [0][0][RTW89_KCC][25] = 44,
+ [0][0][RTW89_ACMA][25] = 127,
+ [0][0][RTW89_CHILE][25] = 62,
+ [0][0][RTW89_UKRAINE][25] = 24,
+ [0][0][RTW89_MEXICO][25] = 52,
+ [0][0][RTW89_CN][25] = 127,
+ [0][0][RTW89_QATAR][25] = 24,
+ [0][0][RTW89_UK][25] = 24,
+ [0][0][RTW89_FCC][27] = 52,
+ [0][0][RTW89_ETSI][27] = 24,
+ [0][0][RTW89_MKK][27] = 48,
+ [0][0][RTW89_IC][27] = 127,
+ [0][0][RTW89_KCC][27] = 44,
+ [0][0][RTW89_ACMA][27] = 127,
+ [0][0][RTW89_CHILE][27] = 62,
+ [0][0][RTW89_UKRAINE][27] = 24,
+ [0][0][RTW89_MEXICO][27] = 52,
+ [0][0][RTW89_CN][27] = 127,
+ [0][0][RTW89_QATAR][27] = 24,
+ [0][0][RTW89_UK][27] = 24,
+ [0][0][RTW89_FCC][29] = 52,
+ [0][0][RTW89_ETSI][29] = 24,
+ [0][0][RTW89_MKK][29] = 48,
+ [0][0][RTW89_IC][29] = 127,
+ [0][0][RTW89_KCC][29] = 44,
+ [0][0][RTW89_ACMA][29] = 127,
+ [0][0][RTW89_CHILE][29] = 60,
+ [0][0][RTW89_UKRAINE][29] = 24,
+ [0][0][RTW89_MEXICO][29] = 52,
+ [0][0][RTW89_CN][29] = 127,
+ [0][0][RTW89_QATAR][29] = 24,
+ [0][0][RTW89_UK][29] = 24,
+ [0][0][RTW89_FCC][31] = 52,
+ [0][0][RTW89_ETSI][31] = 24,
+ [0][0][RTW89_MKK][31] = 48,
+ [0][0][RTW89_IC][31] = 52,
+ [0][0][RTW89_KCC][31] = 44,
+ [0][0][RTW89_ACMA][31] = 24,
+ [0][0][RTW89_CHILE][31] = 60,
+ [0][0][RTW89_UKRAINE][31] = 24,
+ [0][0][RTW89_MEXICO][31] = 52,
+ [0][0][RTW89_CN][31] = 127,
+ [0][0][RTW89_QATAR][31] = 24,
+ [0][0][RTW89_UK][31] = 24,
+ [0][0][RTW89_FCC][33] = 52,
+ [0][0][RTW89_ETSI][33] = 24,
+ [0][0][RTW89_MKK][33] = 48,
+ [0][0][RTW89_IC][33] = 52,
+ [0][0][RTW89_KCC][33] = 44,
+ [0][0][RTW89_ACMA][33] = 24,
+ [0][0][RTW89_CHILE][33] = 60,
+ [0][0][RTW89_UKRAINE][33] = 24,
+ [0][0][RTW89_MEXICO][33] = 52,
+ [0][0][RTW89_CN][33] = 127,
+ [0][0][RTW89_QATAR][33] = 24,
+ [0][0][RTW89_UK][33] = 24,
+ [0][0][RTW89_FCC][35] = 52,
+ [0][0][RTW89_ETSI][35] = 24,
+ [0][0][RTW89_MKK][35] = 48,
+ [0][0][RTW89_IC][35] = 52,
+ [0][0][RTW89_KCC][35] = 44,
+ [0][0][RTW89_ACMA][35] = 24,
+ [0][0][RTW89_CHILE][35] = 60,
+ [0][0][RTW89_UKRAINE][35] = 24,
+ [0][0][RTW89_MEXICO][35] = 52,
+ [0][0][RTW89_CN][35] = 127,
+ [0][0][RTW89_QATAR][35] = 24,
+ [0][0][RTW89_UK][35] = 24,
+ [0][0][RTW89_FCC][37] = 52,
+ [0][0][RTW89_ETSI][37] = 127,
+ [0][0][RTW89_MKK][37] = 44,
+ [0][0][RTW89_IC][37] = 52,
+ [0][0][RTW89_KCC][37] = 44,
+ [0][0][RTW89_ACMA][37] = 52,
+ [0][0][RTW89_CHILE][37] = 62,
+ [0][0][RTW89_UKRAINE][37] = 127,
+ [0][0][RTW89_MEXICO][37] = 52,
+ [0][0][RTW89_CN][37] = 127,
+ [0][0][RTW89_QATAR][37] = 127,
+ [0][0][RTW89_UK][37] = 56,
+ [0][0][RTW89_FCC][38] = 84,
+ [0][0][RTW89_ETSI][38] = 28,
+ [0][0][RTW89_MKK][38] = 127,
+ [0][0][RTW89_IC][38] = 84,
+ [0][0][RTW89_KCC][38] = 44,
+ [0][0][RTW89_ACMA][38] = 84,
+ [0][0][RTW89_CHILE][38] = 60,
+ [0][0][RTW89_UKRAINE][38] = 28,
+ [0][0][RTW89_MEXICO][38] = 84,
+ [0][0][RTW89_CN][38] = 62,
+ [0][0][RTW89_QATAR][38] = 28,
+ [0][0][RTW89_UK][38] = 26,
+ [0][0][RTW89_FCC][40] = 84,
+ [0][0][RTW89_ETSI][40] = 28,
+ [0][0][RTW89_MKK][40] = 127,
+ [0][0][RTW89_IC][40] = 84,
+ [0][0][RTW89_KCC][40] = 44,
+ [0][0][RTW89_ACMA][40] = 84,
+ [0][0][RTW89_CHILE][40] = 60,
+ [0][0][RTW89_UKRAINE][40] = 28,
+ [0][0][RTW89_MEXICO][40] = 84,
+ [0][0][RTW89_CN][40] = 62,
+ [0][0][RTW89_QATAR][40] = 28,
+ [0][0][RTW89_UK][40] = 26,
+ [0][0][RTW89_FCC][42] = 84,
+ [0][0][RTW89_ETSI][42] = 28,
+ [0][0][RTW89_MKK][42] = 127,
+ [0][0][RTW89_IC][42] = 84,
+ [0][0][RTW89_KCC][42] = 44,
+ [0][0][RTW89_ACMA][42] = 84,
+ [0][0][RTW89_CHILE][42] = 64,
+ [0][0][RTW89_UKRAINE][42] = 28,
+ [0][0][RTW89_MEXICO][42] = 84,
+ [0][0][RTW89_CN][42] = 62,
+ [0][0][RTW89_QATAR][42] = 28,
+ [0][0][RTW89_UK][42] = 26,
+ [0][0][RTW89_FCC][44] = 84,
+ [0][0][RTW89_ETSI][44] = 28,
+ [0][0][RTW89_MKK][44] = 127,
+ [0][0][RTW89_IC][44] = 84,
+ [0][0][RTW89_KCC][44] = 44,
+ [0][0][RTW89_ACMA][44] = 84,
+ [0][0][RTW89_CHILE][44] = 60,
+ [0][0][RTW89_UKRAINE][44] = 28,
+ [0][0][RTW89_MEXICO][44] = 84,
+ [0][0][RTW89_CN][44] = 62,
+ [0][0][RTW89_QATAR][44] = 28,
+ [0][0][RTW89_UK][44] = 26,
+ [0][0][RTW89_FCC][46] = 84,
+ [0][0][RTW89_ETSI][46] = 28,
+ [0][0][RTW89_MKK][46] = 127,
+ [0][0][RTW89_IC][46] = 84,
+ [0][0][RTW89_KCC][46] = 44,
+ [0][0][RTW89_ACMA][46] = 84,
+ [0][0][RTW89_CHILE][46] = 60,
+ [0][0][RTW89_UKRAINE][46] = 28,
+ [0][0][RTW89_MEXICO][46] = 84,
+ [0][0][RTW89_CN][46] = 62,
+ [0][0][RTW89_QATAR][46] = 28,
+ [0][0][RTW89_UK][46] = 26,
+ [0][0][RTW89_FCC][48] = 32,
+ [0][0][RTW89_ETSI][48] = 127,
+ [0][0][RTW89_MKK][48] = 127,
+ [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_KCC][48] = 127,
+ [0][0][RTW89_ACMA][48] = 127,
+ [0][0][RTW89_CHILE][48] = 127,
+ [0][0][RTW89_UKRAINE][48] = 127,
+ [0][0][RTW89_MEXICO][48] = 127,
+ [0][0][RTW89_CN][48] = 127,
+ [0][0][RTW89_QATAR][48] = 127,
+ [0][0][RTW89_UK][48] = 127,
+ [0][0][RTW89_FCC][50] = 32,
+ [0][0][RTW89_ETSI][50] = 127,
+ [0][0][RTW89_MKK][50] = 127,
+ [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_KCC][50] = 127,
+ [0][0][RTW89_ACMA][50] = 127,
+ [0][0][RTW89_CHILE][50] = 127,
+ [0][0][RTW89_UKRAINE][50] = 127,
+ [0][0][RTW89_MEXICO][50] = 127,
+ [0][0][RTW89_CN][50] = 127,
+ [0][0][RTW89_QATAR][50] = 127,
+ [0][0][RTW89_UK][50] = 127,
+ [0][0][RTW89_FCC][52] = 32,
+ [0][0][RTW89_ETSI][52] = 127,
+ [0][0][RTW89_MKK][52] = 127,
+ [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_KCC][52] = 127,
+ [0][0][RTW89_ACMA][52] = 127,
+ [0][0][RTW89_CHILE][52] = 127,
+ [0][0][RTW89_UKRAINE][52] = 127,
+ [0][0][RTW89_MEXICO][52] = 127,
+ [0][0][RTW89_CN][52] = 127,
+ [0][0][RTW89_QATAR][52] = 127,
+ [0][0][RTW89_UK][52] = 127,
+ [0][1][RTW89_FCC][0] = 34,
+ [0][1][RTW89_ETSI][0] = 12,
+ [0][1][RTW89_MKK][0] = 12,
+ [0][1][RTW89_IC][0] = 0,
+ [0][1][RTW89_KCC][0] = 28,
+ [0][1][RTW89_ACMA][0] = 12,
+ [0][1][RTW89_CHILE][0] = 14,
+ [0][1][RTW89_UKRAINE][0] = 12,
+ [0][1][RTW89_MEXICO][0] = 34,
+ [0][1][RTW89_CN][0] = 12,
+ [0][1][RTW89_QATAR][0] = 12,
+ [0][1][RTW89_UK][0] = 12,
+ [0][1][RTW89_FCC][2] = 38,
+ [0][1][RTW89_ETSI][2] = 12,
+ [0][1][RTW89_MKK][2] = 12,
+ [0][1][RTW89_IC][2] = 4,
+ [0][1][RTW89_KCC][2] = 28,
+ [0][1][RTW89_ACMA][2] = 12,
+ [0][1][RTW89_CHILE][2] = 12,
+ [0][1][RTW89_UKRAINE][2] = 12,
+ [0][1][RTW89_MEXICO][2] = 38,
+ [0][1][RTW89_CN][2] = 12,
+ [0][1][RTW89_QATAR][2] = 12,
+ [0][1][RTW89_UK][2] = 12,
+ [0][1][RTW89_FCC][4] = 34,
+ [0][1][RTW89_ETSI][4] = 12,
+ [0][1][RTW89_MKK][4] = 14,
+ [0][1][RTW89_IC][4] = 0,
+ [0][1][RTW89_KCC][4] = 28,
+ [0][1][RTW89_ACMA][4] = 12,
+ [0][1][RTW89_CHILE][4] = 12,
+ [0][1][RTW89_UKRAINE][4] = 12,
+ [0][1][RTW89_MEXICO][4] = 34,
+ [0][1][RTW89_CN][4] = 12,
+ [0][1][RTW89_QATAR][4] = 12,
+ [0][1][RTW89_UK][4] = 12,
+ [0][1][RTW89_FCC][6] = 34,
+ [0][1][RTW89_ETSI][6] = 12,
+ [0][1][RTW89_MKK][6] = 14,
+ [0][1][RTW89_IC][6] = 0,
+ [0][1][RTW89_KCC][6] = 2,
+ [0][1][RTW89_ACMA][6] = 12,
+ [0][1][RTW89_CHILE][6] = 12,
+ [0][1][RTW89_UKRAINE][6] = 12,
+ [0][1][RTW89_MEXICO][6] = 34,
+ [0][1][RTW89_CN][6] = 12,
+ [0][1][RTW89_QATAR][6] = 12,
+ [0][1][RTW89_UK][6] = 12,
+ [0][1][RTW89_FCC][8] = 34,
+ [0][1][RTW89_ETSI][8] = 12,
+ [0][1][RTW89_MKK][8] = 14,
+ [0][1][RTW89_IC][8] = 34,
+ [0][1][RTW89_KCC][8] = 30,
+ [0][1][RTW89_ACMA][8] = 12,
+ [0][1][RTW89_CHILE][8] = 50,
+ [0][1][RTW89_UKRAINE][8] = 12,
+ [0][1][RTW89_MEXICO][8] = 34,
+ [0][1][RTW89_CN][8] = 12,
+ [0][1][RTW89_QATAR][8] = 12,
+ [0][1][RTW89_UK][8] = 12,
+ [0][1][RTW89_FCC][10] = 34,
+ [0][1][RTW89_ETSI][10] = 12,
+ [0][1][RTW89_MKK][10] = 14,
+ [0][1][RTW89_IC][10] = 34,
+ [0][1][RTW89_KCC][10] = 30,
+ [0][1][RTW89_ACMA][10] = 12,
+ [0][1][RTW89_CHILE][10] = 50,
+ [0][1][RTW89_UKRAINE][10] = 12,
+ [0][1][RTW89_MEXICO][10] = 34,
+ [0][1][RTW89_CN][10] = 12,
+ [0][1][RTW89_QATAR][10] = 12,
+ [0][1][RTW89_UK][10] = 12,
+ [0][1][RTW89_FCC][12] = 38,
+ [0][1][RTW89_ETSI][12] = 12,
+ [0][1][RTW89_MKK][12] = 12,
+ [0][1][RTW89_IC][12] = 38,
+ [0][1][RTW89_KCC][12] = 30,
+ [0][1][RTW89_ACMA][12] = 12,
+ [0][1][RTW89_CHILE][12] = 50,
+ [0][1][RTW89_UKRAINE][12] = 12,
+ [0][1][RTW89_MEXICO][12] = 38,
+ [0][1][RTW89_CN][12] = 12,
+ [0][1][RTW89_QATAR][12] = 12,
+ [0][1][RTW89_UK][12] = 12,
+ [0][1][RTW89_FCC][14] = 34,
+ [0][1][RTW89_ETSI][14] = 12,
+ [0][1][RTW89_MKK][14] = 12,
+ [0][1][RTW89_IC][14] = 34,
+ [0][1][RTW89_KCC][14] = 30,
+ [0][1][RTW89_ACMA][14] = 12,
+ [0][1][RTW89_CHILE][14] = 48,
+ [0][1][RTW89_UKRAINE][14] = 12,
+ [0][1][RTW89_MEXICO][14] = 34,
+ [0][1][RTW89_CN][14] = 12,
+ [0][1][RTW89_QATAR][14] = 12,
+ [0][1][RTW89_UK][14] = 12,
+ [0][1][RTW89_FCC][15] = 34,
+ [0][1][RTW89_ETSI][15] = 12,
+ [0][1][RTW89_MKK][15] = 32,
+ [0][1][RTW89_IC][15] = 34,
+ [0][1][RTW89_KCC][15] = 30,
+ [0][1][RTW89_ACMA][15] = 12,
+ [0][1][RTW89_CHILE][15] = 52,
+ [0][1][RTW89_UKRAINE][15] = 12,
+ [0][1][RTW89_MEXICO][15] = 34,
+ [0][1][RTW89_CN][15] = 127,
+ [0][1][RTW89_QATAR][15] = 12,
+ [0][1][RTW89_UK][15] = 12,
+ [0][1][RTW89_FCC][17] = 34,
+ [0][1][RTW89_ETSI][17] = 12,
+ [0][1][RTW89_MKK][17] = 34,
+ [0][1][RTW89_IC][17] = 34,
+ [0][1][RTW89_KCC][17] = 30,
+ [0][1][RTW89_ACMA][17] = 12,
+ [0][1][RTW89_CHILE][17] = 52,
+ [0][1][RTW89_UKRAINE][17] = 12,
+ [0][1][RTW89_MEXICO][17] = 34,
+ [0][1][RTW89_CN][17] = 127,
+ [0][1][RTW89_QATAR][17] = 12,
+ [0][1][RTW89_UK][17] = 12,
+ [0][1][RTW89_FCC][19] = 38,
+ [0][1][RTW89_ETSI][19] = 12,
+ [0][1][RTW89_MKK][19] = 34,
+ [0][1][RTW89_IC][19] = 38,
+ [0][1][RTW89_KCC][19] = 30,
+ [0][1][RTW89_ACMA][19] = 12,
+ [0][1][RTW89_CHILE][19] = 52,
+ [0][1][RTW89_UKRAINE][19] = 12,
+ [0][1][RTW89_MEXICO][19] = 38,
+ [0][1][RTW89_CN][19] = 127,
+ [0][1][RTW89_QATAR][19] = 12,
+ [0][1][RTW89_UK][19] = 12,
+ [0][1][RTW89_FCC][21] = 38,
+ [0][1][RTW89_ETSI][21] = 12,
+ [0][1][RTW89_MKK][21] = 34,
+ [0][1][RTW89_IC][21] = 38,
+ [0][1][RTW89_KCC][21] = 30,
+ [0][1][RTW89_ACMA][21] = 12,
+ [0][1][RTW89_CHILE][21] = 52,
+ [0][1][RTW89_UKRAINE][21] = 12,
+ [0][1][RTW89_MEXICO][21] = 38,
+ [0][1][RTW89_CN][21] = 127,
+ [0][1][RTW89_QATAR][21] = 12,
+ [0][1][RTW89_UK][21] = 12,
+ [0][1][RTW89_FCC][23] = 38,
+ [0][1][RTW89_ETSI][23] = 12,
+ [0][1][RTW89_MKK][23] = 34,
+ [0][1][RTW89_IC][23] = 38,
+ [0][1][RTW89_KCC][23] = 30,
+ [0][1][RTW89_ACMA][23] = 12,
+ [0][1][RTW89_CHILE][23] = 52,
+ [0][1][RTW89_UKRAINE][23] = 12,
+ [0][1][RTW89_MEXICO][23] = 38,
+ [0][1][RTW89_CN][23] = 127,
+ [0][1][RTW89_QATAR][23] = 12,
+ [0][1][RTW89_UK][23] = 12,
+ [0][1][RTW89_FCC][25] = 38,
+ [0][1][RTW89_ETSI][25] = 12,
+ [0][1][RTW89_MKK][25] = 34,
+ [0][1][RTW89_IC][25] = 127,
+ [0][1][RTW89_KCC][25] = 30,
+ [0][1][RTW89_ACMA][25] = 127,
+ [0][1][RTW89_CHILE][25] = 52,
+ [0][1][RTW89_UKRAINE][25] = 12,
+ [0][1][RTW89_MEXICO][25] = 38,
+ [0][1][RTW89_CN][25] = 127,
+ [0][1][RTW89_QATAR][25] = 12,
+ [0][1][RTW89_UK][25] = 12,
+ [0][1][RTW89_FCC][27] = 38,
+ [0][1][RTW89_ETSI][27] = 12,
+ [0][1][RTW89_MKK][27] = 34,
+ [0][1][RTW89_IC][27] = 127,
+ [0][1][RTW89_KCC][27] = 30,
+ [0][1][RTW89_ACMA][27] = 127,
+ [0][1][RTW89_CHILE][27] = 52,
+ [0][1][RTW89_UKRAINE][27] = 12,
+ [0][1][RTW89_MEXICO][27] = 38,
+ [0][1][RTW89_CN][27] = 127,
+ [0][1][RTW89_QATAR][27] = 12,
+ [0][1][RTW89_UK][27] = 12,
+ [0][1][RTW89_FCC][29] = 38,
+ [0][1][RTW89_ETSI][29] = 12,
+ [0][1][RTW89_MKK][29] = 34,
+ [0][1][RTW89_IC][29] = 127,
+ [0][1][RTW89_KCC][29] = 30,
+ [0][1][RTW89_ACMA][29] = 127,
+ [0][1][RTW89_CHILE][29] = 52,
+ [0][1][RTW89_UKRAINE][29] = 12,
+ [0][1][RTW89_MEXICO][29] = 38,
+ [0][1][RTW89_CN][29] = 127,
+ [0][1][RTW89_QATAR][29] = 12,
+ [0][1][RTW89_UK][29] = 12,
+ [0][1][RTW89_FCC][31] = 38,
+ [0][1][RTW89_ETSI][31] = 12,
+ [0][1][RTW89_MKK][31] = 34,
+ [0][1][RTW89_IC][31] = 34,
+ [0][1][RTW89_KCC][31] = 30,
+ [0][1][RTW89_ACMA][31] = 12,
+ [0][1][RTW89_CHILE][31] = 52,
+ [0][1][RTW89_UKRAINE][31] = 12,
+ [0][1][RTW89_MEXICO][31] = 38,
+ [0][1][RTW89_CN][31] = 127,
+ [0][1][RTW89_QATAR][31] = 12,
+ [0][1][RTW89_UK][31] = 12,
+ [0][1][RTW89_FCC][33] = 34,
+ [0][1][RTW89_ETSI][33] = 12,
+ [0][1][RTW89_MKK][33] = 34,
+ [0][1][RTW89_IC][33] = 34,
+ [0][1][RTW89_KCC][33] = 30,
+ [0][1][RTW89_ACMA][33] = 12,
+ [0][1][RTW89_CHILE][33] = 52,
+ [0][1][RTW89_UKRAINE][33] = 12,
+ [0][1][RTW89_MEXICO][33] = 34,
+ [0][1][RTW89_CN][33] = 127,
+ [0][1][RTW89_QATAR][33] = 12,
+ [0][1][RTW89_UK][33] = 12,
+ [0][1][RTW89_FCC][35] = 34,
+ [0][1][RTW89_ETSI][35] = 12,
+ [0][1][RTW89_MKK][35] = 34,
+ [0][1][RTW89_IC][35] = 34,
+ [0][1][RTW89_KCC][35] = 30,
+ [0][1][RTW89_ACMA][35] = 12,
+ [0][1][RTW89_CHILE][35] = 52,
+ [0][1][RTW89_UKRAINE][35] = 12,
+ [0][1][RTW89_MEXICO][35] = 34,
+ [0][1][RTW89_CN][35] = 127,
+ [0][1][RTW89_QATAR][35] = 12,
+ [0][1][RTW89_UK][35] = 12,
+ [0][1][RTW89_FCC][37] = 38,
+ [0][1][RTW89_ETSI][37] = 127,
+ [0][1][RTW89_MKK][37] = 34,
+ [0][1][RTW89_IC][37] = 38,
+ [0][1][RTW89_KCC][37] = 30,
+ [0][1][RTW89_ACMA][37] = 38,
+ [0][1][RTW89_CHILE][37] = 52,
+ [0][1][RTW89_UKRAINE][37] = 127,
+ [0][1][RTW89_MEXICO][37] = 38,
+ [0][1][RTW89_CN][37] = 127,
+ [0][1][RTW89_QATAR][37] = 127,
+ [0][1][RTW89_UK][37] = 44,
+ [0][1][RTW89_FCC][38] = 82,
+ [0][1][RTW89_ETSI][38] = 16,
+ [0][1][RTW89_MKK][38] = 127,
+ [0][1][RTW89_IC][38] = 82,
+ [0][1][RTW89_KCC][38] = 30,
+ [0][1][RTW89_ACMA][38] = 84,
+ [0][1][RTW89_CHILE][38] = 52,
+ [0][1][RTW89_UKRAINE][38] = 16,
+ [0][1][RTW89_MEXICO][38] = 82,
+ [0][1][RTW89_CN][38] = 50,
+ [0][1][RTW89_QATAR][38] = 16,
+ [0][1][RTW89_UK][38] = 14,
+ [0][1][RTW89_FCC][40] = 82,
+ [0][1][RTW89_ETSI][40] = 16,
+ [0][1][RTW89_MKK][40] = 127,
+ [0][1][RTW89_IC][40] = 82,
+ [0][1][RTW89_KCC][40] = 30,
+ [0][1][RTW89_ACMA][40] = 84,
+ [0][1][RTW89_CHILE][40] = 52,
+ [0][1][RTW89_UKRAINE][40] = 16,
+ [0][1][RTW89_MEXICO][40] = 82,
+ [0][1][RTW89_CN][40] = 50,
+ [0][1][RTW89_QATAR][40] = 16,
+ [0][1][RTW89_UK][40] = 14,
+ [0][1][RTW89_FCC][42] = 82,
+ [0][1][RTW89_ETSI][42] = 16,
+ [0][1][RTW89_MKK][42] = 127,
+ [0][1][RTW89_IC][42] = 82,
+ [0][1][RTW89_KCC][42] = 30,
+ [0][1][RTW89_ACMA][42] = 84,
+ [0][1][RTW89_CHILE][42] = 54,
+ [0][1][RTW89_UKRAINE][42] = 16,
+ [0][1][RTW89_MEXICO][42] = 82,
+ [0][1][RTW89_CN][42] = 50,
+ [0][1][RTW89_QATAR][42] = 16,
+ [0][1][RTW89_UK][42] = 14,
+ [0][1][RTW89_FCC][44] = 82,
+ [0][1][RTW89_ETSI][44] = 16,
+ [0][1][RTW89_MKK][44] = 127,
+ [0][1][RTW89_IC][44] = 82,
+ [0][1][RTW89_KCC][44] = 30,
+ [0][1][RTW89_ACMA][44] = 84,
+ [0][1][RTW89_CHILE][44] = 54,
+ [0][1][RTW89_UKRAINE][44] = 16,
+ [0][1][RTW89_MEXICO][44] = 82,
+ [0][1][RTW89_CN][44] = 50,
+ [0][1][RTW89_QATAR][44] = 16,
+ [0][1][RTW89_UK][44] = 14,
+ [0][1][RTW89_FCC][46] = 82,
+ [0][1][RTW89_ETSI][46] = 16,
+ [0][1][RTW89_MKK][46] = 127,
+ [0][1][RTW89_IC][46] = 82,
+ [0][1][RTW89_KCC][46] = 30,
+ [0][1][RTW89_ACMA][46] = 84,
+ [0][1][RTW89_CHILE][46] = 54,
+ [0][1][RTW89_UKRAINE][46] = 16,
+ [0][1][RTW89_MEXICO][46] = 82,
+ [0][1][RTW89_CN][46] = 50,
+ [0][1][RTW89_QATAR][46] = 16,
+ [0][1][RTW89_UK][46] = 14,
+ [0][1][RTW89_FCC][48] = 20,
+ [0][1][RTW89_ETSI][48] = 127,
+ [0][1][RTW89_MKK][48] = 127,
+ [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_KCC][48] = 127,
+ [0][1][RTW89_ACMA][48] = 127,
+ [0][1][RTW89_CHILE][48] = 127,
+ [0][1][RTW89_UKRAINE][48] = 127,
+ [0][1][RTW89_MEXICO][48] = 127,
+ [0][1][RTW89_CN][48] = 127,
+ [0][1][RTW89_QATAR][48] = 127,
+ [0][1][RTW89_UK][48] = 127,
+ [0][1][RTW89_FCC][50] = 20,
+ [0][1][RTW89_ETSI][50] = 127,
+ [0][1][RTW89_MKK][50] = 127,
+ [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_KCC][50] = 127,
+ [0][1][RTW89_ACMA][50] = 127,
+ [0][1][RTW89_CHILE][50] = 127,
+ [0][1][RTW89_UKRAINE][50] = 127,
+ [0][1][RTW89_MEXICO][50] = 127,
+ [0][1][RTW89_CN][50] = 127,
+ [0][1][RTW89_QATAR][50] = 127,
+ [0][1][RTW89_UK][50] = 127,
+ [0][1][RTW89_FCC][52] = 20,
+ [0][1][RTW89_ETSI][52] = 127,
+ [0][1][RTW89_MKK][52] = 127,
+ [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_KCC][52] = 127,
+ [0][1][RTW89_ACMA][52] = 127,
+ [0][1][RTW89_CHILE][52] = 127,
+ [0][1][RTW89_UKRAINE][52] = 127,
+ [0][1][RTW89_MEXICO][52] = 127,
+ [0][1][RTW89_CN][52] = 127,
+ [0][1][RTW89_QATAR][52] = 127,
+ [0][1][RTW89_UK][52] = 127,
+ [1][0][RTW89_FCC][0] = 62,
+ [1][0][RTW89_ETSI][0] = 34,
+ [1][0][RTW89_MKK][0] = 36,
+ [1][0][RTW89_IC][0] = 36,
+ [1][0][RTW89_KCC][0] = 52,
+ [1][0][RTW89_ACMA][0] = 34,
+ [1][0][RTW89_CHILE][0] = 40,
+ [1][0][RTW89_UKRAINE][0] = 34,
+ [1][0][RTW89_MEXICO][0] = 62,
+ [1][0][RTW89_CN][0] = 34,
+ [1][0][RTW89_QATAR][0] = 34,
+ [1][0][RTW89_UK][0] = 34,
+ [1][0][RTW89_FCC][2] = 62,
+ [1][0][RTW89_ETSI][2] = 34,
+ [1][0][RTW89_MKK][2] = 36,
+ [1][0][RTW89_IC][2] = 36,
+ [1][0][RTW89_KCC][2] = 52,
+ [1][0][RTW89_ACMA][2] = 34,
+ [1][0][RTW89_CHILE][2] = 42,
+ [1][0][RTW89_UKRAINE][2] = 34,
+ [1][0][RTW89_MEXICO][2] = 62,
+ [1][0][RTW89_CN][2] = 34,
+ [1][0][RTW89_QATAR][2] = 34,
+ [1][0][RTW89_UK][2] = 34,
+ [1][0][RTW89_FCC][4] = 62,
+ [1][0][RTW89_ETSI][4] = 34,
+ [1][0][RTW89_MKK][4] = 34,
+ [1][0][RTW89_IC][4] = 36,
+ [1][0][RTW89_KCC][4] = 52,
+ [1][0][RTW89_ACMA][4] = 34,
+ [1][0][RTW89_CHILE][4] = 42,
+ [1][0][RTW89_UKRAINE][4] = 34,
+ [1][0][RTW89_MEXICO][4] = 62,
+ [1][0][RTW89_CN][4] = 34,
+ [1][0][RTW89_QATAR][4] = 34,
+ [1][0][RTW89_UK][4] = 34,
+ [1][0][RTW89_FCC][6] = 62,
+ [1][0][RTW89_ETSI][6] = 34,
+ [1][0][RTW89_MKK][6] = 34,
+ [1][0][RTW89_IC][6] = 36,
+ [1][0][RTW89_KCC][6] = 26,
+ [1][0][RTW89_ACMA][6] = 34,
+ [1][0][RTW89_CHILE][6] = 42,
+ [1][0][RTW89_UKRAINE][6] = 34,
+ [1][0][RTW89_MEXICO][6] = 62,
+ [1][0][RTW89_CN][6] = 34,
+ [1][0][RTW89_QATAR][6] = 34,
+ [1][0][RTW89_UK][6] = 34,
+ [1][0][RTW89_FCC][8] = 62,
+ [1][0][RTW89_ETSI][8] = 34,
+ [1][0][RTW89_MKK][8] = 36,
+ [1][0][RTW89_IC][8] = 62,
+ [1][0][RTW89_KCC][8] = 54,
+ [1][0][RTW89_ACMA][8] = 34,
+ [1][0][RTW89_CHILE][8] = 64,
+ [1][0][RTW89_UKRAINE][8] = 34,
+ [1][0][RTW89_MEXICO][8] = 62,
+ [1][0][RTW89_CN][8] = 34,
+ [1][0][RTW89_QATAR][8] = 34,
+ [1][0][RTW89_UK][8] = 34,
+ [1][0][RTW89_FCC][10] = 62,
+ [1][0][RTW89_ETSI][10] = 34,
+ [1][0][RTW89_MKK][10] = 36,
+ [1][0][RTW89_IC][10] = 62,
+ [1][0][RTW89_KCC][10] = 54,
+ [1][0][RTW89_ACMA][10] = 34,
+ [1][0][RTW89_CHILE][10] = 64,
+ [1][0][RTW89_UKRAINE][10] = 34,
+ [1][0][RTW89_MEXICO][10] = 62,
+ [1][0][RTW89_CN][10] = 34,
+ [1][0][RTW89_QATAR][10] = 34,
+ [1][0][RTW89_UK][10] = 34,
+ [1][0][RTW89_FCC][12] = 64,
+ [1][0][RTW89_ETSI][12] = 34,
+ [1][0][RTW89_MKK][12] = 36,
+ [1][0][RTW89_IC][12] = 64,
+ [1][0][RTW89_KCC][12] = 54,
+ [1][0][RTW89_ACMA][12] = 34,
+ [1][0][RTW89_CHILE][12] = 64,
+ [1][0][RTW89_UKRAINE][12] = 34,
+ [1][0][RTW89_MEXICO][12] = 64,
+ [1][0][RTW89_CN][12] = 34,
+ [1][0][RTW89_QATAR][12] = 34,
+ [1][0][RTW89_UK][12] = 34,
+ [1][0][RTW89_FCC][14] = 62,
+ [1][0][RTW89_ETSI][14] = 34,
+ [1][0][RTW89_MKK][14] = 36,
+ [1][0][RTW89_IC][14] = 62,
+ [1][0][RTW89_KCC][14] = 54,
+ [1][0][RTW89_ACMA][14] = 34,
+ [1][0][RTW89_CHILE][14] = 64,
+ [1][0][RTW89_UKRAINE][14] = 34,
+ [1][0][RTW89_MEXICO][14] = 62,
+ [1][0][RTW89_CN][14] = 34,
+ [1][0][RTW89_QATAR][14] = 34,
+ [1][0][RTW89_UK][14] = 34,
+ [1][0][RTW89_FCC][15] = 62,
+ [1][0][RTW89_ETSI][15] = 34,
+ [1][0][RTW89_MKK][15] = 54,
+ [1][0][RTW89_IC][15] = 62,
+ [1][0][RTW89_KCC][15] = 54,
+ [1][0][RTW89_ACMA][15] = 34,
+ [1][0][RTW89_CHILE][15] = 62,
+ [1][0][RTW89_UKRAINE][15] = 34,
+ [1][0][RTW89_MEXICO][15] = 62,
+ [1][0][RTW89_CN][15] = 127,
+ [1][0][RTW89_QATAR][15] = 34,
+ [1][0][RTW89_UK][15] = 34,
+ [1][0][RTW89_FCC][17] = 62,
+ [1][0][RTW89_ETSI][17] = 34,
+ [1][0][RTW89_MKK][17] = 58,
+ [1][0][RTW89_IC][17] = 62,
+ [1][0][RTW89_KCC][17] = 54,
+ [1][0][RTW89_ACMA][17] = 34,
+ [1][0][RTW89_CHILE][17] = 62,
+ [1][0][RTW89_UKRAINE][17] = 34,
+ [1][0][RTW89_MEXICO][17] = 62,
+ [1][0][RTW89_CN][17] = 127,
+ [1][0][RTW89_QATAR][17] = 34,
+ [1][0][RTW89_UK][17] = 34,
+ [1][0][RTW89_FCC][19] = 62,
+ [1][0][RTW89_ETSI][19] = 34,
+ [1][0][RTW89_MKK][19] = 58,
+ [1][0][RTW89_IC][19] = 62,
+ [1][0][RTW89_KCC][19] = 54,
+ [1][0][RTW89_ACMA][19] = 34,
+ [1][0][RTW89_CHILE][19] = 62,
+ [1][0][RTW89_UKRAINE][19] = 34,
+ [1][0][RTW89_MEXICO][19] = 62,
+ [1][0][RTW89_CN][19] = 127,
+ [1][0][RTW89_QATAR][19] = 34,
+ [1][0][RTW89_UK][19] = 34,
+ [1][0][RTW89_FCC][21] = 62,
+ [1][0][RTW89_ETSI][21] = 34,
+ [1][0][RTW89_MKK][21] = 58,
+ [1][0][RTW89_IC][21] = 62,
+ [1][0][RTW89_KCC][21] = 54,
+ [1][0][RTW89_ACMA][21] = 34,
+ [1][0][RTW89_CHILE][21] = 64,
+ [1][0][RTW89_UKRAINE][21] = 34,
+ [1][0][RTW89_MEXICO][21] = 62,
+ [1][0][RTW89_CN][21] = 127,
+ [1][0][RTW89_QATAR][21] = 34,
+ [1][0][RTW89_UK][21] = 34,
+ [1][0][RTW89_FCC][23] = 62,
+ [1][0][RTW89_ETSI][23] = 34,
+ [1][0][RTW89_MKK][23] = 58,
+ [1][0][RTW89_IC][23] = 62,
+ [1][0][RTW89_KCC][23] = 54,
+ [1][0][RTW89_ACMA][23] = 34,
+ [1][0][RTW89_CHILE][23] = 64,
+ [1][0][RTW89_UKRAINE][23] = 34,
+ [1][0][RTW89_MEXICO][23] = 62,
+ [1][0][RTW89_CN][23] = 127,
+ [1][0][RTW89_QATAR][23] = 34,
+ [1][0][RTW89_UK][23] = 34,
+ [1][0][RTW89_FCC][25] = 62,
+ [1][0][RTW89_ETSI][25] = 34,
+ [1][0][RTW89_MKK][25] = 58,
+ [1][0][RTW89_IC][25] = 127,
+ [1][0][RTW89_KCC][25] = 54,
+ [1][0][RTW89_ACMA][25] = 127,
+ [1][0][RTW89_CHILE][25] = 64,
+ [1][0][RTW89_UKRAINE][25] = 34,
+ [1][0][RTW89_MEXICO][25] = 62,
+ [1][0][RTW89_CN][25] = 127,
+ [1][0][RTW89_QATAR][25] = 34,
+ [1][0][RTW89_UK][25] = 34,
+ [1][0][RTW89_FCC][27] = 62,
+ [1][0][RTW89_ETSI][27] = 34,
+ [1][0][RTW89_MKK][27] = 58,
+ [1][0][RTW89_IC][27] = 127,
+ [1][0][RTW89_KCC][27] = 54,
+ [1][0][RTW89_ACMA][27] = 127,
+ [1][0][RTW89_CHILE][27] = 64,
+ [1][0][RTW89_UKRAINE][27] = 34,
+ [1][0][RTW89_MEXICO][27] = 62,
+ [1][0][RTW89_CN][27] = 127,
+ [1][0][RTW89_QATAR][27] = 34,
+ [1][0][RTW89_UK][27] = 34,
+ [1][0][RTW89_FCC][29] = 62,
+ [1][0][RTW89_ETSI][29] = 34,
+ [1][0][RTW89_MKK][29] = 58,
+ [1][0][RTW89_IC][29] = 127,
+ [1][0][RTW89_KCC][29] = 54,
+ [1][0][RTW89_ACMA][29] = 127,
+ [1][0][RTW89_CHILE][29] = 66,
+ [1][0][RTW89_UKRAINE][29] = 34,
+ [1][0][RTW89_MEXICO][29] = 62,
+ [1][0][RTW89_CN][29] = 127,
+ [1][0][RTW89_QATAR][29] = 34,
+ [1][0][RTW89_UK][29] = 34,
+ [1][0][RTW89_FCC][31] = 62,
+ [1][0][RTW89_ETSI][31] = 34,
+ [1][0][RTW89_MKK][31] = 58,
+ [1][0][RTW89_IC][31] = 62,
+ [1][0][RTW89_KCC][31] = 54,
+ [1][0][RTW89_ACMA][31] = 34,
+ [1][0][RTW89_CHILE][31] = 66,
+ [1][0][RTW89_UKRAINE][31] = 34,
+ [1][0][RTW89_MEXICO][31] = 62,
+ [1][0][RTW89_CN][31] = 127,
+ [1][0][RTW89_QATAR][31] = 34,
+ [1][0][RTW89_UK][31] = 34,
+ [1][0][RTW89_FCC][33] = 62,
+ [1][0][RTW89_ETSI][33] = 34,
+ [1][0][RTW89_MKK][33] = 58,
+ [1][0][RTW89_IC][33] = 62,
+ [1][0][RTW89_KCC][33] = 54,
+ [1][0][RTW89_ACMA][33] = 34,
+ [1][0][RTW89_CHILE][33] = 66,
+ [1][0][RTW89_UKRAINE][33] = 34,
+ [1][0][RTW89_MEXICO][33] = 62,
+ [1][0][RTW89_CN][33] = 127,
+ [1][0][RTW89_QATAR][33] = 34,
+ [1][0][RTW89_UK][33] = 34,
+ [1][0][RTW89_FCC][35] = 62,
+ [1][0][RTW89_ETSI][35] = 34,
+ [1][0][RTW89_MKK][35] = 58,
+ [1][0][RTW89_IC][35] = 62,
+ [1][0][RTW89_KCC][35] = 54,
+ [1][0][RTW89_ACMA][35] = 34,
+ [1][0][RTW89_CHILE][35] = 66,
+ [1][0][RTW89_UKRAINE][35] = 34,
+ [1][0][RTW89_MEXICO][35] = 62,
+ [1][0][RTW89_CN][35] = 127,
+ [1][0][RTW89_QATAR][35] = 34,
+ [1][0][RTW89_UK][35] = 34,
+ [1][0][RTW89_FCC][37] = 64,
+ [1][0][RTW89_ETSI][37] = 127,
+ [1][0][RTW89_MKK][37] = 52,
+ [1][0][RTW89_IC][37] = 64,
+ [1][0][RTW89_KCC][37] = 54,
+ [1][0][RTW89_ACMA][37] = 64,
+ [1][0][RTW89_CHILE][37] = 64,
+ [1][0][RTW89_UKRAINE][37] = 127,
+ [1][0][RTW89_MEXICO][37] = 64,
+ [1][0][RTW89_CN][37] = 127,
+ [1][0][RTW89_QATAR][37] = 127,
+ [1][0][RTW89_UK][37] = 66,
+ [1][0][RTW89_FCC][38] = 84,
+ [1][0][RTW89_ETSI][38] = 28,
+ [1][0][RTW89_MKK][38] = 127,
+ [1][0][RTW89_IC][38] = 84,
+ [1][0][RTW89_KCC][38] = 56,
+ [1][0][RTW89_ACMA][38] = 84,
+ [1][0][RTW89_CHILE][38] = 64,
+ [1][0][RTW89_UKRAINE][38] = 28,
+ [1][0][RTW89_MEXICO][38] = 84,
+ [1][0][RTW89_CN][38] = 74,
+ [1][0][RTW89_QATAR][38] = 28,
+ [1][0][RTW89_UK][38] = 38,
+ [1][0][RTW89_FCC][40] = 84,
+ [1][0][RTW89_ETSI][40] = 28,
+ [1][0][RTW89_MKK][40] = 127,
+ [1][0][RTW89_IC][40] = 84,
+ [1][0][RTW89_KCC][40] = 56,
+ [1][0][RTW89_ACMA][40] = 84,
+ [1][0][RTW89_CHILE][40] = 64,
+ [1][0][RTW89_UKRAINE][40] = 28,
+ [1][0][RTW89_MEXICO][40] = 84,
+ [1][0][RTW89_CN][40] = 74,
+ [1][0][RTW89_QATAR][40] = 28,
+ [1][0][RTW89_UK][40] = 38,
+ [1][0][RTW89_FCC][42] = 84,
+ [1][0][RTW89_ETSI][42] = 28,
+ [1][0][RTW89_MKK][42] = 127,
+ [1][0][RTW89_IC][42] = 84,
+ [1][0][RTW89_KCC][42] = 56,
+ [1][0][RTW89_ACMA][42] = 84,
+ [1][0][RTW89_CHILE][42] = 64,
+ [1][0][RTW89_UKRAINE][42] = 28,
+ [1][0][RTW89_MEXICO][42] = 84,
+ [1][0][RTW89_CN][42] = 74,
+ [1][0][RTW89_QATAR][42] = 28,
+ [1][0][RTW89_UK][42] = 38,
+ [1][0][RTW89_FCC][44] = 84,
+ [1][0][RTW89_ETSI][44] = 28,
+ [1][0][RTW89_MKK][44] = 127,
+ [1][0][RTW89_IC][44] = 84,
+ [1][0][RTW89_KCC][44] = 56,
+ [1][0][RTW89_ACMA][44] = 84,
+ [1][0][RTW89_CHILE][44] = 64,
+ [1][0][RTW89_UKRAINE][44] = 28,
+ [1][0][RTW89_MEXICO][44] = 84,
+ [1][0][RTW89_CN][44] = 74,
+ [1][0][RTW89_QATAR][44] = 28,
+ [1][0][RTW89_UK][44] = 38,
+ [1][0][RTW89_FCC][46] = 84,
+ [1][0][RTW89_ETSI][46] = 28,
+ [1][0][RTW89_MKK][46] = 127,
+ [1][0][RTW89_IC][46] = 84,
+ [1][0][RTW89_KCC][46] = 56,
+ [1][0][RTW89_ACMA][46] = 84,
+ [1][0][RTW89_CHILE][46] = 64,
+ [1][0][RTW89_UKRAINE][46] = 28,
+ [1][0][RTW89_MEXICO][46] = 84,
+ [1][0][RTW89_CN][46] = 74,
+ [1][0][RTW89_QATAR][46] = 28,
+ [1][0][RTW89_UK][46] = 38,
+ [1][0][RTW89_FCC][48] = 44,
+ [1][0][RTW89_ETSI][48] = 127,
+ [1][0][RTW89_MKK][48] = 127,
+ [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_KCC][48] = 127,
+ [1][0][RTW89_ACMA][48] = 127,
+ [1][0][RTW89_CHILE][48] = 127,
+ [1][0][RTW89_UKRAINE][48] = 127,
+ [1][0][RTW89_MEXICO][48] = 127,
+ [1][0][RTW89_CN][48] = 127,
+ [1][0][RTW89_QATAR][48] = 127,
+ [1][0][RTW89_UK][48] = 127,
+ [1][0][RTW89_FCC][50] = 44,
+ [1][0][RTW89_ETSI][50] = 127,
+ [1][0][RTW89_MKK][50] = 127,
+ [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_KCC][50] = 127,
+ [1][0][RTW89_ACMA][50] = 127,
+ [1][0][RTW89_CHILE][50] = 127,
+ [1][0][RTW89_UKRAINE][50] = 127,
+ [1][0][RTW89_MEXICO][50] = 127,
+ [1][0][RTW89_CN][50] = 127,
+ [1][0][RTW89_QATAR][50] = 127,
+ [1][0][RTW89_UK][50] = 127,
+ [1][0][RTW89_FCC][52] = 44,
+ [1][0][RTW89_ETSI][52] = 127,
+ [1][0][RTW89_MKK][52] = 127,
+ [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_KCC][52] = 127,
+ [1][0][RTW89_ACMA][52] = 127,
+ [1][0][RTW89_CHILE][52] = 127,
+ [1][0][RTW89_UKRAINE][52] = 127,
+ [1][0][RTW89_MEXICO][52] = 127,
+ [1][0][RTW89_CN][52] = 127,
+ [1][0][RTW89_QATAR][52] = 127,
+ [1][0][RTW89_UK][52] = 127,
+ [1][1][RTW89_FCC][0] = 42,
+ [1][1][RTW89_ETSI][0] = 22,
+ [1][1][RTW89_MKK][0] = 22,
+ [1][1][RTW89_IC][0] = 10,
+ [1][1][RTW89_KCC][0] = 36,
+ [1][1][RTW89_ACMA][0] = 22,
+ [1][1][RTW89_CHILE][0] = 22,
+ [1][1][RTW89_UKRAINE][0] = 22,
+ [1][1][RTW89_MEXICO][0] = 42,
+ [1][1][RTW89_CN][0] = 22,
+ [1][1][RTW89_QATAR][0] = 22,
+ [1][1][RTW89_UK][0] = 22,
+ [1][1][RTW89_FCC][2] = 44,
+ [1][1][RTW89_ETSI][2] = 22,
+ [1][1][RTW89_MKK][2] = 22,
+ [1][1][RTW89_IC][2] = 14,
+ [1][1][RTW89_KCC][2] = 36,
+ [1][1][RTW89_ACMA][2] = 22,
+ [1][1][RTW89_CHILE][2] = 22,
+ [1][1][RTW89_UKRAINE][2] = 22,
+ [1][1][RTW89_MEXICO][2] = 44,
+ [1][1][RTW89_CN][2] = 22,
+ [1][1][RTW89_QATAR][2] = 22,
+ [1][1][RTW89_UK][2] = 22,
+ [1][1][RTW89_FCC][4] = 42,
+ [1][1][RTW89_ETSI][4] = 22,
+ [1][1][RTW89_MKK][4] = 20,
+ [1][1][RTW89_IC][4] = 10,
+ [1][1][RTW89_KCC][4] = 36,
+ [1][1][RTW89_ACMA][4] = 22,
+ [1][1][RTW89_CHILE][4] = 20,
+ [1][1][RTW89_UKRAINE][4] = 22,
+ [1][1][RTW89_MEXICO][4] = 42,
+ [1][1][RTW89_CN][4] = 22,
+ [1][1][RTW89_QATAR][4] = 22,
+ [1][1][RTW89_UK][4] = 22,
+ [1][1][RTW89_FCC][6] = 42,
+ [1][1][RTW89_ETSI][6] = 22,
+ [1][1][RTW89_MKK][6] = 20,
+ [1][1][RTW89_IC][6] = 10,
+ [1][1][RTW89_KCC][6] = 10,
+ [1][1][RTW89_ACMA][6] = 22,
+ [1][1][RTW89_CHILE][6] = 20,
+ [1][1][RTW89_UKRAINE][6] = 22,
+ [1][1][RTW89_MEXICO][6] = 42,
+ [1][1][RTW89_CN][6] = 22,
+ [1][1][RTW89_QATAR][6] = 22,
+ [1][1][RTW89_UK][6] = 22,
+ [1][1][RTW89_FCC][8] = 44,
+ [1][1][RTW89_ETSI][8] = 22,
+ [1][1][RTW89_MKK][8] = 20,
+ [1][1][RTW89_IC][8] = 44,
+ [1][1][RTW89_KCC][8] = 36,
+ [1][1][RTW89_ACMA][8] = 22,
+ [1][1][RTW89_CHILE][8] = 54,
+ [1][1][RTW89_UKRAINE][8] = 22,
+ [1][1][RTW89_MEXICO][8] = 44,
+ [1][1][RTW89_CN][8] = 22,
+ [1][1][RTW89_QATAR][8] = 22,
+ [1][1][RTW89_UK][8] = 22,
+ [1][1][RTW89_FCC][10] = 44,
+ [1][1][RTW89_ETSI][10] = 22,
+ [1][1][RTW89_MKK][10] = 20,
+ [1][1][RTW89_IC][10] = 44,
+ [1][1][RTW89_KCC][10] = 36,
+ [1][1][RTW89_ACMA][10] = 22,
+ [1][1][RTW89_CHILE][10] = 54,
+ [1][1][RTW89_UKRAINE][10] = 22,
+ [1][1][RTW89_MEXICO][10] = 44,
+ [1][1][RTW89_CN][10] = 22,
+ [1][1][RTW89_QATAR][10] = 22,
+ [1][1][RTW89_UK][10] = 22,
+ [1][1][RTW89_FCC][12] = 46,
+ [1][1][RTW89_ETSI][12] = 22,
+ [1][1][RTW89_MKK][12] = 22,
+ [1][1][RTW89_IC][12] = 46,
+ [1][1][RTW89_KCC][12] = 40,
+ [1][1][RTW89_ACMA][12] = 22,
+ [1][1][RTW89_CHILE][12] = 52,
+ [1][1][RTW89_UKRAINE][12] = 22,
+ [1][1][RTW89_MEXICO][12] = 46,
+ [1][1][RTW89_CN][12] = 22,
+ [1][1][RTW89_QATAR][12] = 22,
+ [1][1][RTW89_UK][12] = 22,
+ [1][1][RTW89_FCC][14] = 42,
+ [1][1][RTW89_ETSI][14] = 22,
+ [1][1][RTW89_MKK][14] = 22,
+ [1][1][RTW89_IC][14] = 40,
+ [1][1][RTW89_KCC][14] = 40,
+ [1][1][RTW89_ACMA][14] = 22,
+ [1][1][RTW89_CHILE][14] = 54,
+ [1][1][RTW89_UKRAINE][14] = 22,
+ [1][1][RTW89_MEXICO][14] = 42,
+ [1][1][RTW89_CN][14] = 22,
+ [1][1][RTW89_QATAR][14] = 22,
+ [1][1][RTW89_UK][14] = 22,
+ [1][1][RTW89_FCC][15] = 42,
+ [1][1][RTW89_ETSI][15] = 22,
+ [1][1][RTW89_MKK][15] = 42,
+ [1][1][RTW89_IC][15] = 42,
+ [1][1][RTW89_KCC][15] = 38,
+ [1][1][RTW89_ACMA][15] = 22,
+ [1][1][RTW89_CHILE][15] = 54,
+ [1][1][RTW89_UKRAINE][15] = 22,
+ [1][1][RTW89_MEXICO][15] = 42,
+ [1][1][RTW89_CN][15] = 127,
+ [1][1][RTW89_QATAR][15] = 22,
+ [1][1][RTW89_UK][15] = 22,
+ [1][1][RTW89_FCC][17] = 42,
+ [1][1][RTW89_ETSI][17] = 22,
+ [1][1][RTW89_MKK][17] = 44,
+ [1][1][RTW89_IC][17] = 42,
+ [1][1][RTW89_KCC][17] = 38,
+ [1][1][RTW89_ACMA][17] = 22,
+ [1][1][RTW89_CHILE][17] = 54,
+ [1][1][RTW89_UKRAINE][17] = 22,
+ [1][1][RTW89_MEXICO][17] = 42,
+ [1][1][RTW89_CN][17] = 127,
+ [1][1][RTW89_QATAR][17] = 22,
+ [1][1][RTW89_UK][17] = 22,
+ [1][1][RTW89_FCC][19] = 42,
+ [1][1][RTW89_ETSI][19] = 22,
+ [1][1][RTW89_MKK][19] = 44,
+ [1][1][RTW89_IC][19] = 42,
+ [1][1][RTW89_KCC][19] = 38,
+ [1][1][RTW89_ACMA][19] = 22,
+ [1][1][RTW89_CHILE][19] = 54,
+ [1][1][RTW89_UKRAINE][19] = 22,
+ [1][1][RTW89_MEXICO][19] = 42,
+ [1][1][RTW89_CN][19] = 127,
+ [1][1][RTW89_QATAR][19] = 22,
+ [1][1][RTW89_UK][19] = 22,
+ [1][1][RTW89_FCC][21] = 42,
+ [1][1][RTW89_ETSI][21] = 22,
+ [1][1][RTW89_MKK][21] = 44,
+ [1][1][RTW89_IC][21] = 42,
+ [1][1][RTW89_KCC][21] = 38,
+ [1][1][RTW89_ACMA][21] = 22,
+ [1][1][RTW89_CHILE][21] = 54,
+ [1][1][RTW89_UKRAINE][21] = 22,
+ [1][1][RTW89_MEXICO][21] = 42,
+ [1][1][RTW89_CN][21] = 127,
+ [1][1][RTW89_QATAR][21] = 22,
+ [1][1][RTW89_UK][21] = 22,
+ [1][1][RTW89_FCC][23] = 42,
+ [1][1][RTW89_ETSI][23] = 22,
+ [1][1][RTW89_MKK][23] = 44,
+ [1][1][RTW89_IC][23] = 42,
+ [1][1][RTW89_KCC][23] = 38,
+ [1][1][RTW89_ACMA][23] = 22,
+ [1][1][RTW89_CHILE][23] = 54,
+ [1][1][RTW89_UKRAINE][23] = 22,
+ [1][1][RTW89_MEXICO][23] = 42,
+ [1][1][RTW89_CN][23] = 127,
+ [1][1][RTW89_QATAR][23] = 22,
+ [1][1][RTW89_UK][23] = 22,
+ [1][1][RTW89_FCC][25] = 42,
+ [1][1][RTW89_ETSI][25] = 22,
+ [1][1][RTW89_MKK][25] = 44,
+ [1][1][RTW89_IC][25] = 127,
+ [1][1][RTW89_KCC][25] = 38,
+ [1][1][RTW89_ACMA][25] = 127,
+ [1][1][RTW89_CHILE][25] = 54,
+ [1][1][RTW89_UKRAINE][25] = 22,
+ [1][1][RTW89_MEXICO][25] = 42,
+ [1][1][RTW89_CN][25] = 127,
+ [1][1][RTW89_QATAR][25] = 22,
+ [1][1][RTW89_UK][25] = 22,
+ [1][1][RTW89_FCC][27] = 42,
+ [1][1][RTW89_ETSI][27] = 22,
+ [1][1][RTW89_MKK][27] = 44,
+ [1][1][RTW89_IC][27] = 127,
+ [1][1][RTW89_KCC][27] = 38,
+ [1][1][RTW89_ACMA][27] = 127,
+ [1][1][RTW89_CHILE][27] = 54,
+ [1][1][RTW89_UKRAINE][27] = 22,
+ [1][1][RTW89_MEXICO][27] = 42,
+ [1][1][RTW89_CN][27] = 127,
+ [1][1][RTW89_QATAR][27] = 22,
+ [1][1][RTW89_UK][27] = 22,
+ [1][1][RTW89_FCC][29] = 42,
+ [1][1][RTW89_ETSI][29] = 22,
+ [1][1][RTW89_MKK][29] = 44,
+ [1][1][RTW89_IC][29] = 127,
+ [1][1][RTW89_KCC][29] = 38,
+ [1][1][RTW89_ACMA][29] = 127,
+ [1][1][RTW89_CHILE][29] = 54,
+ [1][1][RTW89_UKRAINE][29] = 22,
+ [1][1][RTW89_MEXICO][29] = 42,
+ [1][1][RTW89_CN][29] = 127,
+ [1][1][RTW89_QATAR][29] = 22,
+ [1][1][RTW89_UK][29] = 22,
+ [1][1][RTW89_FCC][31] = 42,
+ [1][1][RTW89_ETSI][31] = 22,
+ [1][1][RTW89_MKK][31] = 44,
+ [1][1][RTW89_IC][31] = 38,
+ [1][1][RTW89_KCC][31] = 38,
+ [1][1][RTW89_ACMA][31] = 22,
+ [1][1][RTW89_CHILE][31] = 54,
+ [1][1][RTW89_UKRAINE][31] = 22,
+ [1][1][RTW89_MEXICO][31] = 42,
+ [1][1][RTW89_CN][31] = 127,
+ [1][1][RTW89_QATAR][31] = 22,
+ [1][1][RTW89_UK][31] = 22,
+ [1][1][RTW89_FCC][33] = 40,
+ [1][1][RTW89_ETSI][33] = 22,
+ [1][1][RTW89_MKK][33] = 44,
+ [1][1][RTW89_IC][33] = 38,
+ [1][1][RTW89_KCC][33] = 38,
+ [1][1][RTW89_ACMA][33] = 22,
+ [1][1][RTW89_CHILE][33] = 54,
+ [1][1][RTW89_UKRAINE][33] = 22,
+ [1][1][RTW89_MEXICO][33] = 40,
+ [1][1][RTW89_CN][33] = 127,
+ [1][1][RTW89_QATAR][33] = 22,
+ [1][1][RTW89_UK][33] = 22,
+ [1][1][RTW89_FCC][35] = 40,
+ [1][1][RTW89_ETSI][35] = 22,
+ [1][1][RTW89_MKK][35] = 44,
+ [1][1][RTW89_IC][35] = 38,
+ [1][1][RTW89_KCC][35] = 38,
+ [1][1][RTW89_ACMA][35] = 22,
+ [1][1][RTW89_CHILE][35] = 54,
+ [1][1][RTW89_UKRAINE][35] = 22,
+ [1][1][RTW89_MEXICO][35] = 40,
+ [1][1][RTW89_CN][35] = 127,
+ [1][1][RTW89_QATAR][35] = 22,
+ [1][1][RTW89_UK][35] = 22,
+ [1][1][RTW89_FCC][37] = 48,
+ [1][1][RTW89_ETSI][37] = 127,
+ [1][1][RTW89_MKK][37] = 42,
+ [1][1][RTW89_IC][37] = 48,
+ [1][1][RTW89_KCC][37] = 38,
+ [1][1][RTW89_ACMA][37] = 48,
+ [1][1][RTW89_CHILE][37] = 54,
+ [1][1][RTW89_UKRAINE][37] = 127,
+ [1][1][RTW89_MEXICO][37] = 48,
+ [1][1][RTW89_CN][37] = 127,
+ [1][1][RTW89_QATAR][37] = 127,
+ [1][1][RTW89_UK][37] = 54,
+ [1][1][RTW89_FCC][38] = 84,
+ [1][1][RTW89_ETSI][38] = 16,
+ [1][1][RTW89_MKK][38] = 127,
+ [1][1][RTW89_IC][38] = 84,
+ [1][1][RTW89_KCC][38] = 38,
+ [1][1][RTW89_ACMA][38] = 82,
+ [1][1][RTW89_CHILE][38] = 54,
+ [1][1][RTW89_UKRAINE][38] = 16,
+ [1][1][RTW89_MEXICO][38] = 84,
+ [1][1][RTW89_CN][38] = 62,
+ [1][1][RTW89_QATAR][38] = 16,
+ [1][1][RTW89_UK][38] = 26,
+ [1][1][RTW89_FCC][40] = 84,
+ [1][1][RTW89_ETSI][40] = 16,
+ [1][1][RTW89_MKK][40] = 127,
+ [1][1][RTW89_IC][40] = 84,
+ [1][1][RTW89_KCC][40] = 38,
+ [1][1][RTW89_ACMA][40] = 82,
+ [1][1][RTW89_CHILE][40] = 54,
+ [1][1][RTW89_UKRAINE][40] = 16,
+ [1][1][RTW89_MEXICO][40] = 84,
+ [1][1][RTW89_CN][40] = 62,
+ [1][1][RTW89_QATAR][40] = 16,
+ [1][1][RTW89_UK][40] = 26,
+ [1][1][RTW89_FCC][42] = 84,
+ [1][1][RTW89_ETSI][42] = 16,
+ [1][1][RTW89_MKK][42] = 127,
+ [1][1][RTW89_IC][42] = 84,
+ [1][1][RTW89_KCC][42] = 38,
+ [1][1][RTW89_ACMA][42] = 84,
+ [1][1][RTW89_CHILE][42] = 54,
+ [1][1][RTW89_UKRAINE][42] = 16,
+ [1][1][RTW89_MEXICO][42] = 84,
+ [1][1][RTW89_CN][42] = 62,
+ [1][1][RTW89_QATAR][42] = 16,
+ [1][1][RTW89_UK][42] = 26,
+ [1][1][RTW89_FCC][44] = 84,
+ [1][1][RTW89_ETSI][44] = 16,
+ [1][1][RTW89_MKK][44] = 127,
+ [1][1][RTW89_IC][44] = 84,
+ [1][1][RTW89_KCC][44] = 38,
+ [1][1][RTW89_ACMA][44] = 84,
+ [1][1][RTW89_CHILE][44] = 56,
+ [1][1][RTW89_UKRAINE][44] = 16,
+ [1][1][RTW89_MEXICO][44] = 84,
+ [1][1][RTW89_CN][44] = 62,
+ [1][1][RTW89_QATAR][44] = 16,
+ [1][1][RTW89_UK][44] = 26,
+ [1][1][RTW89_FCC][46] = 84,
+ [1][1][RTW89_ETSI][46] = 16,
+ [1][1][RTW89_MKK][46] = 127,
+ [1][1][RTW89_IC][46] = 84,
+ [1][1][RTW89_KCC][46] = 38,
+ [1][1][RTW89_ACMA][46] = 84,
+ [1][1][RTW89_CHILE][46] = 56,
+ [1][1][RTW89_UKRAINE][46] = 16,
+ [1][1][RTW89_MEXICO][46] = 84,
+ [1][1][RTW89_CN][46] = 62,
+ [1][1][RTW89_QATAR][46] = 16,
+ [1][1][RTW89_UK][46] = 26,
+ [1][1][RTW89_FCC][48] = 32,
+ [1][1][RTW89_ETSI][48] = 127,
+ [1][1][RTW89_MKK][48] = 127,
+ [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_KCC][48] = 127,
+ [1][1][RTW89_ACMA][48] = 127,
+ [1][1][RTW89_CHILE][48] = 127,
+ [1][1][RTW89_UKRAINE][48] = 127,
+ [1][1][RTW89_MEXICO][48] = 127,
+ [1][1][RTW89_CN][48] = 127,
+ [1][1][RTW89_QATAR][48] = 127,
+ [1][1][RTW89_UK][48] = 127,
+ [1][1][RTW89_FCC][50] = 32,
+ [1][1][RTW89_ETSI][50] = 127,
+ [1][1][RTW89_MKK][50] = 127,
+ [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_KCC][50] = 127,
+ [1][1][RTW89_ACMA][50] = 127,
+ [1][1][RTW89_CHILE][50] = 127,
+ [1][1][RTW89_UKRAINE][50] = 127,
+ [1][1][RTW89_MEXICO][50] = 127,
+ [1][1][RTW89_CN][50] = 127,
+ [1][1][RTW89_QATAR][50] = 127,
+ [1][1][RTW89_UK][50] = 127,
+ [1][1][RTW89_FCC][52] = 32,
+ [1][1][RTW89_ETSI][52] = 127,
+ [1][1][RTW89_MKK][52] = 127,
+ [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_KCC][52] = 127,
+ [1][1][RTW89_ACMA][52] = 127,
+ [1][1][RTW89_CHILE][52] = 127,
+ [1][1][RTW89_UKRAINE][52] = 127,
+ [1][1][RTW89_MEXICO][52] = 127,
+ [1][1][RTW89_CN][52] = 127,
+ [1][1][RTW89_QATAR][52] = 127,
+ [1][1][RTW89_UK][52] = 127,
+ [2][0][RTW89_FCC][0] = 70,
+ [2][0][RTW89_ETSI][0] = 48,
+ [2][0][RTW89_MKK][0] = 48,
+ [2][0][RTW89_IC][0] = 46,
+ [2][0][RTW89_KCC][0] = 66,
+ [2][0][RTW89_ACMA][0] = 48,
+ [2][0][RTW89_CHILE][0] = 44,
+ [2][0][RTW89_UKRAINE][0] = 48,
+ [2][0][RTW89_MEXICO][0] = 64,
+ [2][0][RTW89_CN][0] = 48,
+ [2][0][RTW89_QATAR][0] = 48,
+ [2][0][RTW89_UK][0] = 48,
+ [2][0][RTW89_FCC][2] = 70,
+ [2][0][RTW89_ETSI][2] = 48,
+ [2][0][RTW89_MKK][2] = 48,
+ [2][0][RTW89_IC][2] = 46,
+ [2][0][RTW89_KCC][2] = 66,
+ [2][0][RTW89_ACMA][2] = 48,
+ [2][0][RTW89_CHILE][2] = 44,
+ [2][0][RTW89_UKRAINE][2] = 48,
+ [2][0][RTW89_MEXICO][2] = 64,
+ [2][0][RTW89_CN][2] = 48,
+ [2][0][RTW89_QATAR][2] = 48,
+ [2][0][RTW89_UK][2] = 48,
+ [2][0][RTW89_FCC][4] = 70,
+ [2][0][RTW89_ETSI][4] = 48,
+ [2][0][RTW89_MKK][4] = 48,
+ [2][0][RTW89_IC][4] = 46,
+ [2][0][RTW89_KCC][4] = 66,
+ [2][0][RTW89_ACMA][4] = 48,
+ [2][0][RTW89_CHILE][4] = 44,
+ [2][0][RTW89_UKRAINE][4] = 48,
+ [2][0][RTW89_MEXICO][4] = 64,
+ [2][0][RTW89_CN][4] = 48,
+ [2][0][RTW89_QATAR][4] = 48,
+ [2][0][RTW89_UK][4] = 48,
+ [2][0][RTW89_FCC][6] = 70,
+ [2][0][RTW89_ETSI][6] = 48,
+ [2][0][RTW89_MKK][6] = 48,
+ [2][0][RTW89_IC][6] = 46,
+ [2][0][RTW89_KCC][6] = 38,
+ [2][0][RTW89_ACMA][6] = 48,
+ [2][0][RTW89_CHILE][6] = 44,
+ [2][0][RTW89_UKRAINE][6] = 48,
+ [2][0][RTW89_MEXICO][6] = 64,
+ [2][0][RTW89_CN][6] = 48,
+ [2][0][RTW89_QATAR][6] = 48,
+ [2][0][RTW89_UK][6] = 48,
+ [2][0][RTW89_FCC][8] = 70,
+ [2][0][RTW89_ETSI][8] = 48,
+ [2][0][RTW89_MKK][8] = 48,
+ [2][0][RTW89_IC][8] = 66,
+ [2][0][RTW89_KCC][8] = 64,
+ [2][0][RTW89_ACMA][8] = 48,
+ [2][0][RTW89_CHILE][8] = 66,
+ [2][0][RTW89_UKRAINE][8] = 48,
+ [2][0][RTW89_MEXICO][8] = 70,
+ [2][0][RTW89_CN][8] = 48,
+ [2][0][RTW89_QATAR][8] = 48,
+ [2][0][RTW89_UK][8] = 48,
+ [2][0][RTW89_FCC][10] = 70,
+ [2][0][RTW89_ETSI][10] = 48,
+ [2][0][RTW89_MKK][10] = 48,
+ [2][0][RTW89_IC][10] = 66,
+ [2][0][RTW89_KCC][10] = 64,
+ [2][0][RTW89_ACMA][10] = 48,
+ [2][0][RTW89_CHILE][10] = 66,
+ [2][0][RTW89_UKRAINE][10] = 48,
+ [2][0][RTW89_MEXICO][10] = 70,
+ [2][0][RTW89_CN][10] = 48,
+ [2][0][RTW89_QATAR][10] = 48,
+ [2][0][RTW89_UK][10] = 48,
+ [2][0][RTW89_FCC][12] = 70,
+ [2][0][RTW89_ETSI][12] = 48,
+ [2][0][RTW89_MKK][12] = 46,
+ [2][0][RTW89_IC][12] = 66,
+ [2][0][RTW89_KCC][12] = 64,
+ [2][0][RTW89_ACMA][12] = 48,
+ [2][0][RTW89_CHILE][12] = 66,
+ [2][0][RTW89_UKRAINE][12] = 48,
+ [2][0][RTW89_MEXICO][12] = 70,
+ [2][0][RTW89_CN][12] = 48,
+ [2][0][RTW89_QATAR][12] = 48,
+ [2][0][RTW89_UK][12] = 48,
+ [2][0][RTW89_FCC][14] = 70,
+ [2][0][RTW89_ETSI][14] = 48,
+ [2][0][RTW89_MKK][14] = 46,
+ [2][0][RTW89_IC][14] = 66,
+ [2][0][RTW89_KCC][14] = 64,
+ [2][0][RTW89_ACMA][14] = 48,
+ [2][0][RTW89_CHILE][14] = 66,
+ [2][0][RTW89_UKRAINE][14] = 48,
+ [2][0][RTW89_MEXICO][14] = 70,
+ [2][0][RTW89_CN][14] = 48,
+ [2][0][RTW89_QATAR][14] = 48,
+ [2][0][RTW89_UK][14] = 48,
+ [2][0][RTW89_FCC][15] = 70,
+ [2][0][RTW89_ETSI][15] = 48,
+ [2][0][RTW89_MKK][15] = 68,
+ [2][0][RTW89_IC][15] = 70,
+ [2][0][RTW89_KCC][15] = 64,
+ [2][0][RTW89_ACMA][15] = 48,
+ [2][0][RTW89_CHILE][15] = 62,
+ [2][0][RTW89_UKRAINE][15] = 48,
+ [2][0][RTW89_MEXICO][15] = 70,
+ [2][0][RTW89_CN][15] = 127,
+ [2][0][RTW89_QATAR][15] = 48,
+ [2][0][RTW89_UK][15] = 48,
+ [2][0][RTW89_FCC][17] = 70,
+ [2][0][RTW89_ETSI][17] = 48,
+ [2][0][RTW89_MKK][17] = 70,
+ [2][0][RTW89_IC][17] = 70,
+ [2][0][RTW89_KCC][17] = 64,
+ [2][0][RTW89_ACMA][17] = 48,
+ [2][0][RTW89_CHILE][17] = 62,
+ [2][0][RTW89_UKRAINE][17] = 48,
+ [2][0][RTW89_MEXICO][17] = 70,
+ [2][0][RTW89_CN][17] = 127,
+ [2][0][RTW89_QATAR][17] = 48,
+ [2][0][RTW89_UK][17] = 48,
+ [2][0][RTW89_FCC][19] = 70,
+ [2][0][RTW89_ETSI][19] = 48,
+ [2][0][RTW89_MKK][19] = 70,
+ [2][0][RTW89_IC][19] = 70,
+ [2][0][RTW89_KCC][19] = 64,
+ [2][0][RTW89_ACMA][19] = 48,
+ [2][0][RTW89_CHILE][19] = 62,
+ [2][0][RTW89_UKRAINE][19] = 48,
+ [2][0][RTW89_MEXICO][19] = 70,
+ [2][0][RTW89_CN][19] = 127,
+ [2][0][RTW89_QATAR][19] = 48,
+ [2][0][RTW89_UK][19] = 48,
+ [2][0][RTW89_FCC][21] = 70,
+ [2][0][RTW89_ETSI][21] = 48,
+ [2][0][RTW89_MKK][21] = 70,
+ [2][0][RTW89_IC][21] = 70,
+ [2][0][RTW89_KCC][21] = 64,
+ [2][0][RTW89_ACMA][21] = 48,
+ [2][0][RTW89_CHILE][21] = 64,
+ [2][0][RTW89_UKRAINE][21] = 48,
+ [2][0][RTW89_MEXICO][21] = 70,
+ [2][0][RTW89_CN][21] = 127,
+ [2][0][RTW89_QATAR][21] = 48,
+ [2][0][RTW89_UK][21] = 48,
+ [2][0][RTW89_FCC][23] = 70,
+ [2][0][RTW89_ETSI][23] = 48,
+ [2][0][RTW89_MKK][23] = 70,
+ [2][0][RTW89_IC][23] = 70,
+ [2][0][RTW89_KCC][23] = 64,
+ [2][0][RTW89_ACMA][23] = 48,
+ [2][0][RTW89_CHILE][23] = 64,
+ [2][0][RTW89_UKRAINE][23] = 48,
+ [2][0][RTW89_MEXICO][23] = 70,
+ [2][0][RTW89_CN][23] = 127,
+ [2][0][RTW89_QATAR][23] = 48,
+ [2][0][RTW89_UK][23] = 48,
+ [2][0][RTW89_FCC][25] = 70,
+ [2][0][RTW89_ETSI][25] = 48,
+ [2][0][RTW89_MKK][25] = 70,
+ [2][0][RTW89_IC][25] = 127,
+ [2][0][RTW89_KCC][25] = 64,
+ [2][0][RTW89_ACMA][25] = 127,
+ [2][0][RTW89_CHILE][25] = 64,
+ [2][0][RTW89_UKRAINE][25] = 48,
+ [2][0][RTW89_MEXICO][25] = 70,
+ [2][0][RTW89_CN][25] = 127,
+ [2][0][RTW89_QATAR][25] = 48,
+ [2][0][RTW89_UK][25] = 48,
+ [2][0][RTW89_FCC][27] = 70,
+ [2][0][RTW89_ETSI][27] = 48,
+ [2][0][RTW89_MKK][27] = 70,
+ [2][0][RTW89_IC][27] = 127,
+ [2][0][RTW89_KCC][27] = 64,
+ [2][0][RTW89_ACMA][27] = 127,
+ [2][0][RTW89_CHILE][27] = 64,
+ [2][0][RTW89_UKRAINE][27] = 48,
+ [2][0][RTW89_MEXICO][27] = 70,
+ [2][0][RTW89_CN][27] = 127,
+ [2][0][RTW89_QATAR][27] = 48,
+ [2][0][RTW89_UK][27] = 48,
+ [2][0][RTW89_FCC][29] = 70,
+ [2][0][RTW89_ETSI][29] = 48,
+ [2][0][RTW89_MKK][29] = 70,
+ [2][0][RTW89_IC][29] = 127,
+ [2][0][RTW89_KCC][29] = 64,
+ [2][0][RTW89_ACMA][29] = 127,
+ [2][0][RTW89_CHILE][29] = 66,
+ [2][0][RTW89_UKRAINE][29] = 48,
+ [2][0][RTW89_MEXICO][29] = 70,
+ [2][0][RTW89_CN][29] = 127,
+ [2][0][RTW89_QATAR][29] = 48,
+ [2][0][RTW89_UK][29] = 48,
+ [2][0][RTW89_FCC][31] = 70,
+ [2][0][RTW89_ETSI][31] = 48,
+ [2][0][RTW89_MKK][31] = 70,
+ [2][0][RTW89_IC][31] = 72,
+ [2][0][RTW89_KCC][31] = 64,
+ [2][0][RTW89_ACMA][31] = 48,
+ [2][0][RTW89_CHILE][31] = 66,
+ [2][0][RTW89_UKRAINE][31] = 48,
+ [2][0][RTW89_MEXICO][31] = 70,
+ [2][0][RTW89_CN][31] = 127,
+ [2][0][RTW89_QATAR][31] = 48,
+ [2][0][RTW89_UK][31] = 48,
+ [2][0][RTW89_FCC][33] = 72,
+ [2][0][RTW89_ETSI][33] = 48,
+ [2][0][RTW89_MKK][33] = 70,
+ [2][0][RTW89_IC][33] = 72,
+ [2][0][RTW89_KCC][33] = 64,
+ [2][0][RTW89_ACMA][33] = 48,
+ [2][0][RTW89_CHILE][33] = 66,
+ [2][0][RTW89_UKRAINE][33] = 48,
+ [2][0][RTW89_MEXICO][33] = 72,
+ [2][0][RTW89_CN][33] = 127,
+ [2][0][RTW89_QATAR][33] = 48,
+ [2][0][RTW89_UK][33] = 48,
+ [2][0][RTW89_FCC][35] = 72,
+ [2][0][RTW89_ETSI][35] = 48,
+ [2][0][RTW89_MKK][35] = 70,
+ [2][0][RTW89_IC][35] = 72,
+ [2][0][RTW89_KCC][35] = 64,
+ [2][0][RTW89_ACMA][35] = 48,
+ [2][0][RTW89_CHILE][35] = 66,
+ [2][0][RTW89_UKRAINE][35] = 48,
+ [2][0][RTW89_MEXICO][35] = 72,
+ [2][0][RTW89_CN][35] = 127,
+ [2][0][RTW89_QATAR][35] = 48,
+ [2][0][RTW89_UK][35] = 48,
+ [2][0][RTW89_FCC][37] = 70,
+ [2][0][RTW89_ETSI][37] = 127,
+ [2][0][RTW89_MKK][37] = 66,
+ [2][0][RTW89_IC][37] = 70,
+ [2][0][RTW89_KCC][37] = 64,
+ [2][0][RTW89_ACMA][37] = 76,
+ [2][0][RTW89_CHILE][37] = 66,
+ [2][0][RTW89_UKRAINE][37] = 127,
+ [2][0][RTW89_MEXICO][37] = 70,
+ [2][0][RTW89_CN][37] = 127,
+ [2][0][RTW89_QATAR][37] = 127,
+ [2][0][RTW89_UK][37] = 76,
+ [2][0][RTW89_FCC][38] = 84,
+ [2][0][RTW89_ETSI][38] = 28,
+ [2][0][RTW89_MKK][38] = 127,
+ [2][0][RTW89_IC][38] = 84,
+ [2][0][RTW89_KCC][38] = 66,
+ [2][0][RTW89_ACMA][38] = 84,
+ [2][0][RTW89_CHILE][38] = 64,
+ [2][0][RTW89_UKRAINE][38] = 28,
+ [2][0][RTW89_MEXICO][38] = 84,
+ [2][0][RTW89_CN][38] = 76,
+ [2][0][RTW89_QATAR][38] = 28,
+ [2][0][RTW89_UK][38] = 50,
+ [2][0][RTW89_FCC][40] = 84,
+ [2][0][RTW89_ETSI][40] = 28,
+ [2][0][RTW89_MKK][40] = 127,
+ [2][0][RTW89_IC][40] = 84,
+ [2][0][RTW89_KCC][40] = 66,
+ [2][0][RTW89_ACMA][40] = 84,
+ [2][0][RTW89_CHILE][40] = 64,
+ [2][0][RTW89_UKRAINE][40] = 28,
+ [2][0][RTW89_MEXICO][40] = 84,
+ [2][0][RTW89_CN][40] = 76,
+ [2][0][RTW89_QATAR][40] = 28,
+ [2][0][RTW89_UK][40] = 50,
+ [2][0][RTW89_FCC][42] = 84,
+ [2][0][RTW89_ETSI][42] = 28,
+ [2][0][RTW89_MKK][42] = 127,
+ [2][0][RTW89_IC][42] = 84,
+ [2][0][RTW89_KCC][42] = 66,
+ [2][0][RTW89_ACMA][42] = 84,
+ [2][0][RTW89_CHILE][42] = 66,
+ [2][0][RTW89_UKRAINE][42] = 28,
+ [2][0][RTW89_MEXICO][42] = 84,
+ [2][0][RTW89_CN][42] = 76,
+ [2][0][RTW89_QATAR][42] = 28,
+ [2][0][RTW89_UK][42] = 50,
+ [2][0][RTW89_FCC][44] = 84,
+ [2][0][RTW89_ETSI][44] = 28,
+ [2][0][RTW89_MKK][44] = 127,
+ [2][0][RTW89_IC][44] = 84,
+ [2][0][RTW89_KCC][44] = 66,
+ [2][0][RTW89_ACMA][44] = 84,
+ [2][0][RTW89_CHILE][44] = 64,
+ [2][0][RTW89_UKRAINE][44] = 28,
+ [2][0][RTW89_MEXICO][44] = 84,
+ [2][0][RTW89_CN][44] = 76,
+ [2][0][RTW89_QATAR][44] = 28,
+ [2][0][RTW89_UK][44] = 50,
+ [2][0][RTW89_FCC][46] = 84,
+ [2][0][RTW89_ETSI][46] = 28,
+ [2][0][RTW89_MKK][46] = 127,
+ [2][0][RTW89_IC][46] = 84,
+ [2][0][RTW89_KCC][46] = 66,
+ [2][0][RTW89_ACMA][46] = 84,
+ [2][0][RTW89_CHILE][46] = 64,
+ [2][0][RTW89_UKRAINE][46] = 28,
+ [2][0][RTW89_MEXICO][46] = 84,
+ [2][0][RTW89_CN][46] = 76,
+ [2][0][RTW89_QATAR][46] = 28,
+ [2][0][RTW89_UK][46] = 50,
+ [2][0][RTW89_FCC][48] = 56,
+ [2][0][RTW89_ETSI][48] = 127,
+ [2][0][RTW89_MKK][48] = 127,
+ [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_KCC][48] = 127,
+ [2][0][RTW89_ACMA][48] = 127,
+ [2][0][RTW89_CHILE][48] = 127,
+ [2][0][RTW89_UKRAINE][48] = 127,
+ [2][0][RTW89_MEXICO][48] = 127,
+ [2][0][RTW89_CN][48] = 127,
+ [2][0][RTW89_QATAR][48] = 127,
+ [2][0][RTW89_UK][48] = 127,
+ [2][0][RTW89_FCC][50] = 56,
+ [2][0][RTW89_ETSI][50] = 127,
+ [2][0][RTW89_MKK][50] = 127,
+ [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_KCC][50] = 127,
+ [2][0][RTW89_ACMA][50] = 127,
+ [2][0][RTW89_CHILE][50] = 127,
+ [2][0][RTW89_UKRAINE][50] = 127,
+ [2][0][RTW89_MEXICO][50] = 127,
+ [2][0][RTW89_CN][50] = 127,
+ [2][0][RTW89_QATAR][50] = 127,
+ [2][0][RTW89_UK][50] = 127,
+ [2][0][RTW89_FCC][52] = 56,
+ [2][0][RTW89_ETSI][52] = 127,
+ [2][0][RTW89_MKK][52] = 127,
+ [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_KCC][52] = 127,
+ [2][0][RTW89_ACMA][52] = 127,
+ [2][0][RTW89_CHILE][52] = 127,
+ [2][0][RTW89_UKRAINE][52] = 127,
+ [2][0][RTW89_MEXICO][52] = 127,
+ [2][0][RTW89_CN][52] = 127,
+ [2][0][RTW89_QATAR][52] = 127,
+ [2][0][RTW89_UK][52] = 127,
+ [2][1][RTW89_FCC][0] = 50,
+ [2][1][RTW89_ETSI][0] = 36,
+ [2][1][RTW89_MKK][0] = 36,
+ [2][1][RTW89_IC][0] = 20,
+ [2][1][RTW89_KCC][0] = 46,
+ [2][1][RTW89_ACMA][0] = 36,
+ [2][1][RTW89_CHILE][0] = 32,
+ [2][1][RTW89_UKRAINE][0] = 36,
+ [2][1][RTW89_MEXICO][0] = 52,
+ [2][1][RTW89_CN][0] = 36,
+ [2][1][RTW89_QATAR][0] = 36,
+ [2][1][RTW89_UK][0] = 36,
+ [2][1][RTW89_FCC][2] = 50,
+ [2][1][RTW89_ETSI][2] = 36,
+ [2][1][RTW89_MKK][2] = 36,
+ [2][1][RTW89_IC][2] = 18,
+ [2][1][RTW89_KCC][2] = 46,
+ [2][1][RTW89_ACMA][2] = 36,
+ [2][1][RTW89_CHILE][2] = 32,
+ [2][1][RTW89_UKRAINE][2] = 36,
+ [2][1][RTW89_MEXICO][2] = 52,
+ [2][1][RTW89_CN][2] = 36,
+ [2][1][RTW89_QATAR][2] = 36,
+ [2][1][RTW89_UK][2] = 36,
+ [2][1][RTW89_FCC][4] = 50,
+ [2][1][RTW89_ETSI][4] = 36,
+ [2][1][RTW89_MKK][4] = 36,
+ [2][1][RTW89_IC][4] = 22,
+ [2][1][RTW89_KCC][4] = 46,
+ [2][1][RTW89_ACMA][4] = 36,
+ [2][1][RTW89_CHILE][4] = 30,
+ [2][1][RTW89_UKRAINE][4] = 36,
+ [2][1][RTW89_MEXICO][4] = 52,
+ [2][1][RTW89_CN][4] = 36,
+ [2][1][RTW89_QATAR][4] = 36,
+ [2][1][RTW89_UK][4] = 36,
+ [2][1][RTW89_FCC][6] = 50,
+ [2][1][RTW89_ETSI][6] = 36,
+ [2][1][RTW89_MKK][6] = 36,
+ [2][1][RTW89_IC][6] = 22,
+ [2][1][RTW89_KCC][6] = 22,
+ [2][1][RTW89_ACMA][6] = 36,
+ [2][1][RTW89_CHILE][6] = 30,
+ [2][1][RTW89_UKRAINE][6] = 36,
+ [2][1][RTW89_MEXICO][6] = 52,
+ [2][1][RTW89_CN][6] = 36,
+ [2][1][RTW89_QATAR][6] = 36,
+ [2][1][RTW89_UK][6] = 36,
+ [2][1][RTW89_FCC][8] = 50,
+ [2][1][RTW89_ETSI][8] = 36,
+ [2][1][RTW89_MKK][8] = 34,
+ [2][1][RTW89_IC][8] = 50,
+ [2][1][RTW89_KCC][8] = 48,
+ [2][1][RTW89_ACMA][8] = 36,
+ [2][1][RTW89_CHILE][8] = 54,
+ [2][1][RTW89_UKRAINE][8] = 36,
+ [2][1][RTW89_MEXICO][8] = 50,
+ [2][1][RTW89_CN][8] = 36,
+ [2][1][RTW89_QATAR][8] = 36,
+ [2][1][RTW89_UK][8] = 36,
+ [2][1][RTW89_FCC][10] = 50,
+ [2][1][RTW89_ETSI][10] = 36,
+ [2][1][RTW89_MKK][10] = 34,
+ [2][1][RTW89_IC][10] = 50,
+ [2][1][RTW89_KCC][10] = 48,
+ [2][1][RTW89_ACMA][10] = 36,
+ [2][1][RTW89_CHILE][10] = 54,
+ [2][1][RTW89_UKRAINE][10] = 36,
+ [2][1][RTW89_MEXICO][10] = 50,
+ [2][1][RTW89_CN][10] = 36,
+ [2][1][RTW89_QATAR][10] = 36,
+ [2][1][RTW89_UK][10] = 36,
+ [2][1][RTW89_FCC][12] = 52,
+ [2][1][RTW89_ETSI][12] = 36,
+ [2][1][RTW89_MKK][12] = 36,
+ [2][1][RTW89_IC][12] = 52,
+ [2][1][RTW89_KCC][12] = 48,
+ [2][1][RTW89_ACMA][12] = 36,
+ [2][1][RTW89_CHILE][12] = 54,
+ [2][1][RTW89_UKRAINE][12] = 36,
+ [2][1][RTW89_MEXICO][12] = 52,
+ [2][1][RTW89_CN][12] = 36,
+ [2][1][RTW89_QATAR][12] = 36,
+ [2][1][RTW89_UK][12] = 36,
+ [2][1][RTW89_FCC][14] = 52,
+ [2][1][RTW89_ETSI][14] = 36,
+ [2][1][RTW89_MKK][14] = 36,
+ [2][1][RTW89_IC][14] = 52,
+ [2][1][RTW89_KCC][14] = 48,
+ [2][1][RTW89_ACMA][14] = 36,
+ [2][1][RTW89_CHILE][14] = 54,
+ [2][1][RTW89_UKRAINE][14] = 36,
+ [2][1][RTW89_MEXICO][14] = 52,
+ [2][1][RTW89_CN][14] = 36,
+ [2][1][RTW89_QATAR][14] = 36,
+ [2][1][RTW89_UK][14] = 36,
+ [2][1][RTW89_FCC][15] = 50,
+ [2][1][RTW89_ETSI][15] = 36,
+ [2][1][RTW89_MKK][15] = 54,
+ [2][1][RTW89_IC][15] = 50,
+ [2][1][RTW89_KCC][15] = 48,
+ [2][1][RTW89_ACMA][15] = 36,
+ [2][1][RTW89_CHILE][15] = 56,
+ [2][1][RTW89_UKRAINE][15] = 36,
+ [2][1][RTW89_MEXICO][15] = 50,
+ [2][1][RTW89_CN][15] = 127,
+ [2][1][RTW89_QATAR][15] = 36,
+ [2][1][RTW89_UK][15] = 36,
+ [2][1][RTW89_FCC][17] = 50,
+ [2][1][RTW89_ETSI][17] = 36,
+ [2][1][RTW89_MKK][17] = 56,
+ [2][1][RTW89_IC][17] = 50,
+ [2][1][RTW89_KCC][17] = 48,
+ [2][1][RTW89_ACMA][17] = 36,
+ [2][1][RTW89_CHILE][17] = 56,
+ [2][1][RTW89_UKRAINE][17] = 36,
+ [2][1][RTW89_MEXICO][17] = 50,
+ [2][1][RTW89_CN][17] = 127,
+ [2][1][RTW89_QATAR][17] = 36,
+ [2][1][RTW89_UK][17] = 36,
+ [2][1][RTW89_FCC][19] = 50,
+ [2][1][RTW89_ETSI][19] = 36,
+ [2][1][RTW89_MKK][19] = 56,
+ [2][1][RTW89_IC][19] = 50,
+ [2][1][RTW89_KCC][19] = 48,
+ [2][1][RTW89_ACMA][19] = 36,
+ [2][1][RTW89_CHILE][19] = 56,
+ [2][1][RTW89_UKRAINE][19] = 36,
+ [2][1][RTW89_MEXICO][19] = 50,
+ [2][1][RTW89_CN][19] = 127,
+ [2][1][RTW89_QATAR][19] = 36,
+ [2][1][RTW89_UK][19] = 36,
+ [2][1][RTW89_FCC][21] = 50,
+ [2][1][RTW89_ETSI][21] = 36,
+ [2][1][RTW89_MKK][21] = 56,
+ [2][1][RTW89_IC][21] = 50,
+ [2][1][RTW89_KCC][21] = 48,
+ [2][1][RTW89_ACMA][21] = 36,
+ [2][1][RTW89_CHILE][21] = 58,
+ [2][1][RTW89_UKRAINE][21] = 36,
+ [2][1][RTW89_MEXICO][21] = 50,
+ [2][1][RTW89_CN][21] = 127,
+ [2][1][RTW89_QATAR][21] = 36,
+ [2][1][RTW89_UK][21] = 36,
+ [2][1][RTW89_FCC][23] = 50,
+ [2][1][RTW89_ETSI][23] = 36,
+ [2][1][RTW89_MKK][23] = 56,
+ [2][1][RTW89_IC][23] = 50,
+ [2][1][RTW89_KCC][23] = 48,
+ [2][1][RTW89_ACMA][23] = 36,
+ [2][1][RTW89_CHILE][23] = 58,
+ [2][1][RTW89_UKRAINE][23] = 36,
+ [2][1][RTW89_MEXICO][23] = 50,
+ [2][1][RTW89_CN][23] = 127,
+ [2][1][RTW89_QATAR][23] = 36,
+ [2][1][RTW89_UK][23] = 36,
+ [2][1][RTW89_FCC][25] = 50,
+ [2][1][RTW89_ETSI][25] = 36,
+ [2][1][RTW89_MKK][25] = 56,
+ [2][1][RTW89_IC][25] = 127,
+ [2][1][RTW89_KCC][25] = 48,
+ [2][1][RTW89_ACMA][25] = 127,
+ [2][1][RTW89_CHILE][25] = 58,
+ [2][1][RTW89_UKRAINE][25] = 36,
+ [2][1][RTW89_MEXICO][25] = 50,
+ [2][1][RTW89_CN][25] = 127,
+ [2][1][RTW89_QATAR][25] = 36,
+ [2][1][RTW89_UK][25] = 36,
+ [2][1][RTW89_FCC][27] = 50,
+ [2][1][RTW89_ETSI][27] = 36,
+ [2][1][RTW89_MKK][27] = 56,
+ [2][1][RTW89_IC][27] = 127,
+ [2][1][RTW89_KCC][27] = 48,
+ [2][1][RTW89_ACMA][27] = 127,
+ [2][1][RTW89_CHILE][27] = 58,
+ [2][1][RTW89_UKRAINE][27] = 36,
+ [2][1][RTW89_MEXICO][27] = 50,
+ [2][1][RTW89_CN][27] = 127,
+ [2][1][RTW89_QATAR][27] = 36,
+ [2][1][RTW89_UK][27] = 36,
+ [2][1][RTW89_FCC][29] = 50,
+ [2][1][RTW89_ETSI][29] = 36,
+ [2][1][RTW89_MKK][29] = 56,
+ [2][1][RTW89_IC][29] = 127,
+ [2][1][RTW89_KCC][29] = 48,
+ [2][1][RTW89_ACMA][29] = 127,
+ [2][1][RTW89_CHILE][29] = 56,
+ [2][1][RTW89_UKRAINE][29] = 36,
+ [2][1][RTW89_MEXICO][29] = 50,
+ [2][1][RTW89_CN][29] = 127,
+ [2][1][RTW89_QATAR][29] = 36,
+ [2][1][RTW89_UK][29] = 36,
+ [2][1][RTW89_FCC][31] = 50,
+ [2][1][RTW89_ETSI][31] = 36,
+ [2][1][RTW89_MKK][31] = 56,
+ [2][1][RTW89_IC][31] = 50,
+ [2][1][RTW89_KCC][31] = 48,
+ [2][1][RTW89_ACMA][31] = 36,
+ [2][1][RTW89_CHILE][31] = 56,
+ [2][1][RTW89_UKRAINE][31] = 36,
+ [2][1][RTW89_MEXICO][31] = 50,
+ [2][1][RTW89_CN][31] = 127,
+ [2][1][RTW89_QATAR][31] = 36,
+ [2][1][RTW89_UK][31] = 36,
+ [2][1][RTW89_FCC][33] = 50,
+ [2][1][RTW89_ETSI][33] = 36,
+ [2][1][RTW89_MKK][33] = 56,
+ [2][1][RTW89_IC][33] = 50,
+ [2][1][RTW89_KCC][33] = 48,
+ [2][1][RTW89_ACMA][33] = 36,
+ [2][1][RTW89_CHILE][33] = 56,
+ [2][1][RTW89_UKRAINE][33] = 36,
+ [2][1][RTW89_MEXICO][33] = 50,
+ [2][1][RTW89_CN][33] = 127,
+ [2][1][RTW89_QATAR][33] = 36,
+ [2][1][RTW89_UK][33] = 36,
+ [2][1][RTW89_FCC][35] = 50,
+ [2][1][RTW89_ETSI][35] = 36,
+ [2][1][RTW89_MKK][35] = 56,
+ [2][1][RTW89_IC][35] = 50,
+ [2][1][RTW89_KCC][35] = 48,
+ [2][1][RTW89_ACMA][35] = 36,
+ [2][1][RTW89_CHILE][35] = 56,
+ [2][1][RTW89_UKRAINE][35] = 36,
+ [2][1][RTW89_MEXICO][35] = 50,
+ [2][1][RTW89_CN][35] = 127,
+ [2][1][RTW89_QATAR][35] = 36,
+ [2][1][RTW89_UK][35] = 36,
+ [2][1][RTW89_FCC][37] = 50,
+ [2][1][RTW89_ETSI][37] = 127,
+ [2][1][RTW89_MKK][37] = 54,
+ [2][1][RTW89_IC][37] = 50,
+ [2][1][RTW89_KCC][37] = 48,
+ [2][1][RTW89_ACMA][37] = 60,
+ [2][1][RTW89_CHILE][37] = 56,
+ [2][1][RTW89_UKRAINE][37] = 127,
+ [2][1][RTW89_MEXICO][37] = 50,
+ [2][1][RTW89_CN][37] = 127,
+ [2][1][RTW89_QATAR][37] = 127,
+ [2][1][RTW89_UK][37] = 66,
+ [2][1][RTW89_FCC][38] = 84,
+ [2][1][RTW89_ETSI][38] = 16,
+ [2][1][RTW89_MKK][38] = 127,
+ [2][1][RTW89_IC][38] = 84,
+ [2][1][RTW89_KCC][38] = 48,
+ [2][1][RTW89_ACMA][38] = 84,
+ [2][1][RTW89_CHILE][38] = 58,
+ [2][1][RTW89_UKRAINE][38] = 16,
+ [2][1][RTW89_MEXICO][38] = 84,
+ [2][1][RTW89_CN][38] = 64,
+ [2][1][RTW89_QATAR][38] = 16,
+ [2][1][RTW89_UK][38] = 38,
+ [2][1][RTW89_FCC][40] = 84,
+ [2][1][RTW89_ETSI][40] = 16,
+ [2][1][RTW89_MKK][40] = 127,
+ [2][1][RTW89_IC][40] = 84,
+ [2][1][RTW89_KCC][40] = 48,
+ [2][1][RTW89_ACMA][40] = 84,
+ [2][1][RTW89_CHILE][40] = 58,
+ [2][1][RTW89_UKRAINE][40] = 16,
+ [2][1][RTW89_MEXICO][40] = 84,
+ [2][1][RTW89_CN][40] = 64,
+ [2][1][RTW89_QATAR][40] = 16,
+ [2][1][RTW89_UK][40] = 38,
+ [2][1][RTW89_FCC][42] = 84,
+ [2][1][RTW89_ETSI][42] = 16,
+ [2][1][RTW89_MKK][42] = 127,
+ [2][1][RTW89_IC][42] = 84,
+ [2][1][RTW89_KCC][42] = 48,
+ [2][1][RTW89_ACMA][42] = 84,
+ [2][1][RTW89_CHILE][42] = 58,
+ [2][1][RTW89_UKRAINE][42] = 16,
+ [2][1][RTW89_MEXICO][42] = 84,
+ [2][1][RTW89_CN][42] = 64,
+ [2][1][RTW89_QATAR][42] = 16,
+ [2][1][RTW89_UK][42] = 38,
+ [2][1][RTW89_FCC][44] = 84,
+ [2][1][RTW89_ETSI][44] = 16,
+ [2][1][RTW89_MKK][44] = 127,
+ [2][1][RTW89_IC][44] = 84,
+ [2][1][RTW89_KCC][44] = 48,
+ [2][1][RTW89_ACMA][44] = 84,
+ [2][1][RTW89_CHILE][44] = 58,
+ [2][1][RTW89_UKRAINE][44] = 16,
+ [2][1][RTW89_MEXICO][44] = 84,
+ [2][1][RTW89_CN][44] = 64,
+ [2][1][RTW89_QATAR][44] = 16,
+ [2][1][RTW89_UK][44] = 38,
+ [2][1][RTW89_FCC][46] = 84,
+ [2][1][RTW89_ETSI][46] = 16,
+ [2][1][RTW89_MKK][46] = 127,
+ [2][1][RTW89_IC][46] = 84,
+ [2][1][RTW89_KCC][46] = 48,
+ [2][1][RTW89_ACMA][46] = 84,
+ [2][1][RTW89_CHILE][46] = 58,
+ [2][1][RTW89_UKRAINE][46] = 16,
+ [2][1][RTW89_MEXICO][46] = 84,
+ [2][1][RTW89_CN][46] = 64,
+ [2][1][RTW89_QATAR][46] = 16,
+ [2][1][RTW89_UK][46] = 38,
+ [2][1][RTW89_FCC][48] = 44,
+ [2][1][RTW89_ETSI][48] = 127,
+ [2][1][RTW89_MKK][48] = 127,
+ [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_KCC][48] = 127,
+ [2][1][RTW89_ACMA][48] = 127,
+ [2][1][RTW89_CHILE][48] = 127,
+ [2][1][RTW89_UKRAINE][48] = 127,
+ [2][1][RTW89_MEXICO][48] = 127,
+ [2][1][RTW89_CN][48] = 127,
+ [2][1][RTW89_QATAR][48] = 127,
+ [2][1][RTW89_UK][48] = 127,
+ [2][1][RTW89_FCC][50] = 44,
+ [2][1][RTW89_ETSI][50] = 127,
+ [2][1][RTW89_MKK][50] = 127,
+ [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_KCC][50] = 127,
+ [2][1][RTW89_ACMA][50] = 127,
+ [2][1][RTW89_CHILE][50] = 127,
+ [2][1][RTW89_UKRAINE][50] = 127,
+ [2][1][RTW89_MEXICO][50] = 127,
+ [2][1][RTW89_CN][50] = 127,
+ [2][1][RTW89_QATAR][50] = 127,
+ [2][1][RTW89_UK][50] = 127,
+ [2][1][RTW89_FCC][52] = 44,
+ [2][1][RTW89_ETSI][52] = 127,
+ [2][1][RTW89_MKK][52] = 127,
+ [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_KCC][52] = 127,
+ [2][1][RTW89_ACMA][52] = 127,
+ [2][1][RTW89_CHILE][52] = 127,
+ [2][1][RTW89_UKRAINE][52] = 127,
+ [2][1][RTW89_MEXICO][52] = 127,
+ [2][1][RTW89_CN][52] = 127,
+ [2][1][RTW89_QATAR][52] = 127,
+ [2][1][RTW89_UK][52] = 127,
+};
+
+const struct rtw89_phy_table rtw89_8852b_phy_bb_table = {
+ .regs = rtw89_8852b_phy_bb_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852b_phy_bb_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852b_phy_bb_gain_table = {
+ .regs = rtw89_8852b_phy_bb_reg_gain,
+ .n_regs = ARRAY_SIZE(rtw89_8852b_phy_bb_reg_gain),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852b_phy_radioa_table = {
+ .regs = rtw89_8852b_phy_radioa_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852b_phy_radioa_regs),
+ .rf_path = RF_PATH_A,
+ .config = rtw89_phy_config_rf_reg_v1,
+};
+
+const struct rtw89_phy_table rtw89_8852b_phy_radiob_table = {
+ .regs = rtw89_8852b_phy_radiob_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852b_phy_radiob_regs),
+ .rf_path = RF_PATH_B,
+ .config = rtw89_phy_config_rf_reg_v1,
+};
+
+const struct rtw89_phy_table rtw89_8852b_phy_nctl_table = {
+ .regs = rtw89_8852b_phy_nctl_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852b_phy_nctl_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_txpwr_table rtw89_8852b_byr_table = {
+ .data = rtw89_8852b_txpwr_byrate,
+ .size = ARRAY_SIZE(rtw89_8852b_txpwr_byrate),
+ .load = rtw89_phy_load_txpwr_byrate,
+};
+
+const struct rtw89_txpwr_track_cfg rtw89_8852b_trk_cfg = {
+ .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n,
+ .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p,
+ .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n,
+ .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p,
+ .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n,
+ .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p,
+ .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n,
+ .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p,
+ .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n,
+ .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p,
+ .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n,
+ .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p,
+};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h
new file mode 100644
index 000000000000..114337ac9fb0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852B_TABLE_H__
+#define __RTW89_8852B_TABLE_H__
+
+#include "core.h"
+
+extern const struct rtw89_phy_table rtw89_8852b_phy_bb_table;
+extern const struct rtw89_phy_table rtw89_8852b_phy_bb_gain_table;
+extern const struct rtw89_phy_table rtw89_8852b_phy_radioa_table;
+extern const struct rtw89_phy_table rtw89_8852b_phy_radiob_table;
+extern const struct rtw89_phy_table rtw89_8852b_phy_nctl_table;
+extern const struct rtw89_txpwr_table rtw89_8852b_byr_table;
+extern const struct rtw89_txpwr_track_cfg rtw89_8852b_trk_cfg;
+extern const u8 rtw89_8852b_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
+ [RTW89_REGD_NUM];
+extern const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+extern const s8 rtw89_8852b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 7bf95c38d3eb..0ef2ca8efeb0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -7,18 +7,82 @@
#include "pci.h"
#include "reg.h"
+#include "rtw8852b.h"
static const struct rtw89_pci_info rtw8852b_pci_info = {
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_2048B,
+ .rx_burst = MAC_AX_RX_BURST_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+
+ .init_cfg_reg = R_AX_PCIE_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN,
+ .rxhci_en_bit = B_AX_RXHCI_EN,
+ .rxbd_mode_bit = B_AX_RXBD_MODE,
+ .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
+ .txbd_rwptr_clr2_reg = 0,
.dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
.dma_stop2 = {0},
.dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
.dma_busy2_reg = 0,
.dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+ .rpwm_addr = R_AX_PCIE_HRPWM,
+ .cpwm_addr = R_AX_CPWM,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+ .bd_idx_addr_low_power = NULL,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+
+ .ltr_set = rtw89_pci_ltr_set,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .config_intr_mask = rtw89_pci_config_intr_mask,
+ .enable_intr = rtw89_pci_enable_intr,
+ .disable_intr = rtw89_pci_disable_intr,
+ .recognize_intrs = rtw89_pci_recognize_intrs,
+};
+
+static const struct rtw89_driver_info rtw89_8852be_info = {
+ .chip = &rtw8852b_chip_info,
+ .bus = {
+ .pci = &rtw8852b_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852be_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852be_info,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb85b),
+ .driver_data = (kernel_ulong_t)&rtw89_8852be_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852be_id_table);
+
+static struct pci_driver rtw89_8852be_driver = {
+ .name = "rtw89_8852be",
+ .id_table = rtw89_8852be_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
};
+module_pci_driver(rtw89_8852be_driver);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE driver");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 67653b3e1a35..a87482cc25f5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -273,6 +273,9 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN |
B_AX_TMAC_EN | B_AX_RMAC_EN);
+ rtw89_write32_mask(rtwdev, R_AX_LED1_FUNC_SEL, B_AX_PINMUX_EESK_FUNC_SEL_V1_MASK,
+ PINMUX_EESK_FUNC_SEL_BT_LOG);
+
return 0;
}
@@ -785,40 +788,12 @@ static const struct rtw8852c_bb_gain_op1db bb_gain_op1db_a = {
.mask_tia0_lna6 = 0xff000000,
};
-static enum rtw89_phy_bb_gain_band
-rtw8852c_mapping_gain_band(enum rtw89_subband subband)
-{
- switch (subband) {
- default:
- case RTW89_CH_2G:
- return RTW89_BB_GAIN_BAND_2G;
- case RTW89_CH_5G_BAND_1:
- return RTW89_BB_GAIN_BAND_5G_L;
- case RTW89_CH_5G_BAND_3:
- return RTW89_BB_GAIN_BAND_5G_M;
- case RTW89_CH_5G_BAND_4:
- return RTW89_BB_GAIN_BAND_5G_H;
- case RTW89_CH_6G_BAND_IDX0:
- case RTW89_CH_6G_BAND_IDX1:
- return RTW89_BB_GAIN_BAND_6G_L;
- case RTW89_CH_6G_BAND_IDX2:
- case RTW89_CH_6G_BAND_IDX3:
- return RTW89_BB_GAIN_BAND_6G_M;
- case RTW89_CH_6G_BAND_IDX4:
- case RTW89_CH_6G_BAND_IDX5:
- return RTW89_BB_GAIN_BAND_6G_H;
- case RTW89_CH_6G_BAND_IDX6:
- case RTW89_CH_6G_BAND_IDX7:
- return RTW89_BB_GAIN_BAND_6G_UH;
- }
-}
-
static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
- u8 gain_band = rtw8852c_mapping_gain_band(subband);
+ u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
u32 mask;
@@ -976,21 +951,7 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_RPL_OFST, B_RPL_OFST_MASK, tmp & 0x7f);
}
- switch (chan->subband_type) {
- default:
- case RTW89_CH_2G:
- gain_band = RTW89_GAIN_OFFSET_2G_OFDM;
- break;
- case RTW89_CH_5G_BAND_1:
- gain_band = RTW89_GAIN_OFFSET_5G_LOW;
- break;
- case RTW89_CH_5G_BAND_3:
- gain_band = RTW89_GAIN_OFFSET_5G_MID;
- break;
- case RTW89_CH_5G_BAND_4:
- gain_band = RTW89_GAIN_OFFSET_5G_HIGH;
- break;
- }
+ gain_band = rtw89_subband_to_gain_offset_band_of_ofdm(chan->subband_type);
offset_q0 = -efuse_gain->offset[path][gain_band];
offset_base_q4 = efuse_gain->offset_base[phy_idx];
@@ -1722,12 +1683,12 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ static const u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0,
+ B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1};
struct rtw89_hal *hal = &rtwdev->hal;
bool cck_en = chan->band_type == RTW89_BAND_2G;
u8 pri_ch_idx = chan->pri_ch_idx;
u32 mask, reg;
- u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0,
- B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1};
u8 ntx_path;
if (chan->band_type == RTW89_BAND_2G)
@@ -1871,11 +1832,11 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
{
- struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
rtwdev->is_tssi_mode[RF_PATH_A] = false;
rtwdev->is_tssi_mode[RF_PATH_B] = false;
- memset(mcc_info, 0, sizeof(*mcc_info));
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
rtw8852c_lck_init(rtwdev);
rtw8852c_rck(rtwdev);
@@ -2006,75 +1967,6 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
phy_idx);
}
-static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 band = chan->band_type;
- u8 ch = chan->channel;
- static const u8 rs[] = {
- RTW89_RS_CCK,
- RTW89_RS_OFDM,
- RTW89_RS_MCS,
- RTW89_RS_HEDCM,
- };
- s8 tmp;
- u8 i, j;
- u32 val, shf, addr = R_AX_PWR_BY_RATE;
- struct rtw89_rate_desc cur;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] set txpwr byrate with ch=%d\n", ch);
-
- for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
- for (i = 0; i < ARRAY_SIZE(rs); i++) {
- if (cur.nss >= rtw89_rs_nss_max[rs[i]])
- continue;
-
- val = 0;
- cur.rs = rs[i];
-
- for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
- cur.idx = j;
- shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
- &cur);
- val |= (tmp << shf);
-
- if ((j + 1) % 4)
- continue;
-
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
- val = 0;
- addr += 4;
- }
- }
- }
-}
-
-static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 band = chan->band_type;
- struct rtw89_rate_desc desc = {
- .nss = RTW89_NSS_1,
- .rs = RTW89_RS_OFFSET,
- };
- u32 val = 0;
- s8 v;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
-
- for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
- val |= ((v & 0xf) << (4 * desc.idx));
- }
-
- rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
- GENMASK(19, 0), val);
-}
-
static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
u8 tx_shape_idx,
enum rtw89_phy_idx phy_idx)
@@ -2147,83 +2039,15 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
tx_shape_ofdm);
}
-static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
-#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = chan->channel;
- u8 bw = chan->band_width;
- struct rtw89_txpwr_limit lmt[NTX_NUM_8852C];
- u32 addr, val;
- const s8 *ptr;
- u8 i, j;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
-
- for (i = 0; i < NTX_NUM_8852C; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
-
- for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
- addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
- ptr = (s8 *)&lmt[i] + j;
-
- val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
- FIELD_PREP(GENMASK(15, 8), ptr[1]) |
- FIELD_PREP(GENMASK(23, 16), ptr[2]) |
- FIELD_PREP(GENMASK(31, 24), ptr[3]);
-
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
- }
- }
-#undef __MAC_TXPWR_LMT_PAGE_SIZE
-}
-
-static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
-#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = chan->channel;
- u8 bw = chan->band_width;
- struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852C];
- u32 addr, val;
- const s8 *ptr;
- u8 i, j;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
-
- for (i = 0; i < NTX_NUM_8852C; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
-
- for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
- addr = R_AX_PWR_RU_LMT + j +
- __MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
- ptr = (s8 *)&lmt_ru[i] + j;
-
- val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
- FIELD_PREP(GENMASK(15, 8), ptr[1]) |
- FIELD_PREP(GENMASK(23, 16), ptr[2]) |
- FIELD_PREP(GENMASK(31, 24), ptr[3]);
-
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
- }
- }
-
-#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
-}
-
static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_byrate(rtwdev, chan, phy_idx);
- rtw8852c_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
- rtw8852c_set_txpwr_limit(rtwdev, chan, phy_idx);
- rtw8852c_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -2819,19 +2643,6 @@ static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852c_mon_reg[] = {
};
static
-void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_btc_bt_link_info *b = &bt->link_info;
-
- /* fix LNA2 = level-5 for BT ACI issue at BTG */
- if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0)
- dm->trx_para_level = 1;
-}
-
-static
void rtw8852c_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
{
/* Feature move to firmware */
@@ -2985,6 +2796,15 @@ static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
return 0;
}
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = RTW89_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW89_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+};
+#endif
+
static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.enable_bb_rf = rtw8852c_mac_enable_bb_rf,
.disable_bb_rf = rtw8852c_mac_disable_bb_rf,
@@ -3027,7 +2847,6 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.btc_set_wl_pri = rtw8852c_btc_set_wl_pri,
.btc_set_wl_txpwr_ctrl = rtw8852c_btc_set_wl_txpwr_ctrl,
.btc_get_bt_rssi = rtw8852c_btc_get_bt_rssi,
- .btc_bt_aci_imp = rtw8852c_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852c_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852c_btc_wl_s1_standby,
.btc_set_wl_rx_gain = rtw8852c_btc_set_wl_rx_gain,
@@ -3045,6 +2864,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852c_hfc_param_ini_pcie,
.dle_mem = rtw8852c_dle_mem_pcie,
+ .wde_qempty_acq_num = 16,
+ .wde_qempty_mgq_sel = 16,
.rf_base_addr = {0xe000, 0xf000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
@@ -3070,6 +2891,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
.support_bw160 = true,
+ .support_ul_tb_ctrl = false,
.hw_sec_hdr = true,
.rf_path_num = 2,
.tx_nss = 2,
@@ -3132,11 +2954,15 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1,
.c2h_regs = rtw8852c_c2h_regs,
.page_regs = &rtw8852c_page_regs,
+ .cfo_src_fd = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 5,
.imr_info = &rtw8852c_imr_info,
.rrsr_cfgs = &rtw8852c_rrsr_cfgs,
.dma_ch_mask = 0,
+#ifdef CONFIG_PM
+ .wowlan_stub = &rtw_wowlan_stub_8852c,
+#endif
};
EXPORT_SYMBOL(rtw8852c_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
index 558dd0f048f2..ac642808a81f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
@@ -9,7 +9,6 @@
#define RF_PATH_NUM_8852C 2
#define BB_PATH_NUM_8852C 2
-#define NTX_NUM_8852C 2
struct rtw8852c_u_efuse {
u8 rsvd[0x38];
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 006c2cf93111..60cd676fe22c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -22,8 +22,7 @@ static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
static const u32 rtw8852c_backup_bb_regs[] = {
- 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220,
- 0xc1d4, 0xc1d8, 0xc1e8
+ 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x8220, 0xc1d4, 0xc1d8, 0xc1e8
};
static const u32 rtw8852c_backup_rf_regs[] = {
@@ -1031,9 +1030,9 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
{
- struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 idx = mcc_info->table_idx;
+ u8 idx = rfk_mcc->table_idx;
bool is_fail1, is_fail2;
u32 val;
u32 core_i;
@@ -1376,10 +1375,10 @@ static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
{
- struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
u8 idx = 0;
- idx = mcc_info->table_idx;
+ idx = rfk_mcc->table_idx;
rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
@@ -1667,7 +1666,7 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
- mdelay(10);
+ udelay(10);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -3825,20 +3824,20 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
- u8 idx = mcc_info->table_idx;
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ u8 idx = rfk_mcc->table_idx;
int i;
for (i = 0; i < RTW89_IQK_CHS_NR; i++) {
- if (mcc_info->ch[idx] == 0)
+ if (rfk_mcc->ch[idx] == 0)
break;
if (++idx >= RTW89_IQK_CHS_NR)
idx = 0;
}
- mcc_info->table_idx = idx;
- mcc_info->ch[idx] = chan->channel;
- mcc_info->band[idx] = chan->band_type;
+ rfk_mcc->table_idx = idx;
+ rfk_mcc->ch[idx] = chan->channel;
+ rfk_mcc->band[idx] = chan->band_type;
}
void rtw8852c_rck(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
index 11f35e7a7f0e..96c264a057ff 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -10,6 +10,8 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0xF0FF0000, 0x00000000},
{0xF03300FF, 0x00000001},
{0xF03400FF, 0x00000002},
+ {0xF03500FF, 0x00000003},
+ {0xF03600FF, 0x00000004},
{0x70C, 0x00000020},
{0x704, 0x601E0100},
{0x4000, 0x00000000},
@@ -200,7 +202,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4264, 0x00000000},
{0x4268, 0x00000000},
{0x426C, 0x0418317C},
- {0x46C0, 0x00000001},
+ {0x46C0, 0x00000000},
{0x4270, 0x00D6135C},
{0x46C4, 0x00000033},
{0x4274, 0x00000000},
@@ -342,7 +344,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x442C, 0x00000000},
{0x4430, 0x00000000},
{0x4434, 0x00000000},
- {0x4438, 0x590642D0},
+ {0x4438, 0x59096398},
{0x443C, 0x398668A0},
{0x4440, 0x6C100808},
{0x4444, 0x4A145344},
@@ -566,9 +568,9 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4BA8, 0x002B6456},
{0x45E0, 0x00000000},
{0x45E4, 0x00000000},
- {0x45E8, 0x00E2E1E1},
+ {0x45E8, 0x00C8E1E1},
{0x45EC, 0xCBCBB6B6},
- {0x45F0, 0x59100FCA},
+ {0x45F0, 0x5F900FCA},
{0x4BAC, 0x12CAB6DE},
{0x4BB0, 0x00001110},
{0x45F4, 0x08882550},
@@ -584,9 +586,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4660, 0x41250EF4},
{0x4664, 0x6750E458},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x45DC, 0xE1CB38E8},
- {0x4660, 0x4A2E1800},
- {0x4664, 0x6750E462},
+ {0x45DC, 0xD1B942F4},
+ {0x4660, 0x41250EF4},
+ {0x4664, 0x6750E458},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xD1B942F4},
+ {0x4660, 0x41250EF4},
+ {0x4664, 0x6750E458},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xD1B942F4},
+ {0x4660, 0x41250EF4},
+ {0x4664, 0x6750E458},
{0xA0000000, 0x00000000},
{0x45DC, 0xE1CB38E8},
{0x4660, 0x4A2E1800},
@@ -603,7 +613,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4688, 0x1A10FF04},
{0x468C, 0x282A3000},
{0x4690, 0x2A29292A},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4694, 0x04FA2A2A},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4694, 0x04FA2A2A},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4694, 0x06FA2A2A},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4694, 0x04FA2A2A},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
{0x4694, 0x04FA2A2A},
+ {0xA0000000, 0x00000000},
+ {0x4694, 0x04FA2A2A},
+ {0xB0000000, 0x00000000},
{0x4698, 0xEE0F04D1},
{0x469C, 0x89291436},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
@@ -612,6 +634,10 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x46A0, 0x0701E79E},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
{0x46A0, 0x0701E79E},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
{0xA0000000, 0x00000000},
{0x46A0, 0x0701E79E},
{0xB0000000, 0x00000000},
@@ -620,11 +646,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x46A8, 0x2212FF14},
{0x46AC, 0x60423537},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x649EFF14},
+ {0x46AC, 0xA1B37C4E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
{0x46A8, 0x4D1E7F14},
{0x46AC, 0x60B37C4E},
- {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x46A8, 0x2212FF14},
- {0x46AC, 0x60423537},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x649EFF14},
+ {0x46AC, 0xA1B37C4E},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x649EFF14},
+ {0x46AC, 0xA1B37C4E},
{0xA0000000, 0x00000000},
{0x46A8, 0x2212FF14},
{0x46AC, 0x60423537},
@@ -637,11 +669,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4720, 0x3FFFFD63},
{0x4724, 0xB58D11FF},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x46BC, 0x5107C252},
- {0x4720, 0x27795843},
+ {0x46BC, 0x510FC252},
+ {0x4720, 0x27795303},
{0x4724, 0xB58D11F5},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x46BC, 0x5107C252},
+ {0x46BC, 0x510FC252},
+ {0x4720, 0x27795843},
+ {0x4724, 0xB58D11F5},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x510FC252},
+ {0x4720, 0x27795303},
+ {0x4724, 0xB58D11F5},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x510FC252},
{0x4720, 0x27795303},
{0x4724, 0xB58D11F5},
{0xA0000000, 0x00000000},
@@ -656,11 +696,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4734, 0x00000020},
{0x4738, 0x8325C500},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4734, 0x003D4C20},
+ {0x4734, 0x003D5420},
{0x4738, 0x8F25C500},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x003D4C20},
+ {0x4738, 0x8F25C500},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
{0x4734, 0x003D5420},
- {0x4738, 0x8725C500},
+ {0x4738, 0x8F25C500},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x003D5420},
+ {0x4738, 0x8F25C500},
{0xA0000000, 0x00000000},
{0x4734, 0x00000020},
{0x4738, 0x8325C500},
@@ -678,8 +724,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4BB4, 0x05EBC8AF},
{0x4BB8, 0x99543D24},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4BB4, 0xFBD5B89F},
- {0x4BB8, 0x99563918},
+ {0x4BB4, 0x05EBC8AF},
+ {0x4BB8, 0x99543D24},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0x05EBC8AF},
+ {0x4BB8, 0x99543D24},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0x05EBC8AF},
+ {0x4BB8, 0x99543D24},
{0xA0000000, 0x00000000},
{0x4BB4, 0xFBD5B89F},
{0x4BB8, 0x99563918},
@@ -729,10 +781,10 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4C58, 0x00001146},
{0x4C5C, 0x00000000},
{0x4C60, 0x00000000},
- {0x4C64, 0xE2E1E1DE},
+ {0x4C64, 0xC8E1E1DE},
{0x4C68, 0xB6B600B6},
{0x4C6C, 0xCACBCBCA},
- {0x4C70, 0x8091010F},
+ {0x4C70, 0x80F9010F},
{0x4C74, 0x00000B11},
{0x46C8, 0x08882550},
{0x46CC, 0x08CC2660},
@@ -747,9 +799,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4744, 0x412504E8},
{0x4748, 0x6850E459},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4740, 0xE4CD38E8},
- {0x4744, 0x4C321B04},
- {0x4748, 0x6750E466},
+ {0x4740, 0xC5AD42F4},
+ {0x4744, 0x412504E8},
+ {0x4748, 0x6850E459},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xC5AD42F4},
+ {0x4744, 0x412504E8},
+ {0x4748, 0x6850E459},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xC5AD42F4},
+ {0x4744, 0x412504E8},
+ {0x4748, 0x6850E459},
{0xA0000000, 0x00000000},
{0x4740, 0xE4CD38E8},
{0x4744, 0x4C321B04},
@@ -766,7 +826,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x476C, 0x1A10FF04},
{0x4770, 0x282A3000},
{0x4774, 0x2A29292A},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4778, 0x04FA2A2A},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4778, 0x04FA2A2A},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4778, 0x06FA2A2A},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
{0x4778, 0x04FA2A2A},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4778, 0x04FA2A2A},
+ {0xA0000000, 0x00000000},
+ {0x4778, 0x04FA2A2A},
+ {0xB0000000, 0x00000000},
{0x477C, 0xEE0F04D1},
{0x49F0, 0x89291436},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
@@ -775,6 +847,10 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x49F4, 0x0701E79E},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
{0x49F4, 0x0701E79E},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
{0xA0000000, 0x00000000},
{0x49F4, 0x0701E79E},
{0xB0000000, 0x00000000},
@@ -783,11 +859,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4A5C, 0x2212FF14},
{0x4A60, 0x60423537},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x649EFF14},
+ {0x4A60, 0xA1B37C4E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
{0x4A5C, 0x4D1E7F14},
{0x4A60, 0x60B37C4E},
- {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4A5C, 0x2212FF14},
- {0x4A60, 0x60423537},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x649EFF14},
+ {0x4A60, 0xA1B37C4E},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x649EFF14},
+ {0x4A60, 0xA1B37C4E},
{0xA0000000, 0x00000000},
{0x4A5C, 0x2212FF14},
{0x4A60, 0x60423537},
@@ -800,11 +882,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4A74, 0x3FFFFD63},
{0x4A78, 0xB58D11FF},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4A70, 0x5107C252},
- {0x4A74, 0x27795843},
+ {0x4A70, 0x510FC252},
+ {0x4A74, 0x27795303},
{0x4A78, 0xB58D11F5},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4A70, 0x5107C252},
+ {0x4A70, 0x510FC252},
+ {0x4A74, 0x27795843},
+ {0x4A78, 0xB58D11F5},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x510FC252},
+ {0x4A74, 0x27795303},
+ {0x4A78, 0xB58D11F5},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x510FC252},
{0x4A74, 0x27795303},
{0x4A78, 0xB58D11F5},
{0xA0000000, 0x00000000},
@@ -819,11 +909,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4AA0, 0x00000020},
{0x4AA4, 0x8325C500},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4AA0, 0x003D4C20},
+ {0x4AA0, 0x003D5420},
{0x4AA4, 0x8F25C500},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x003D4C20},
+ {0x4AA4, 0x8F25C500},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
{0x4AA0, 0x003D5420},
- {0x4AA4, 0x8725C500},
+ {0x4AA4, 0x8F25C500},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x003D5420},
+ {0x4AA4, 0x8F25C500},
{0xA0000000, 0x00000000},
{0x4AA0, 0x00000020},
{0x4AA4, 0x8325C500},
@@ -841,8 +937,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4C78, 0x07ECC9B0},
{0x4C7C, 0x995B4126},
{0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x4C78, 0xFBD5B89F},
- {0x4C7C, 0x99563918},
+ {0x4C78, 0x07ECC9B0},
+ {0x4C7C, 0x995B4126},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0x07ECC9B0},
+ {0x4C7C, 0x995B4126},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0x07ECC9B0},
+ {0x4C7C, 0x995B4126},
{0xA0000000, 0x00000000},
{0x4C78, 0xFBD5B89F},
{0x4C7C, 0x99563918},
@@ -907,17 +1009,46 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x47B4, 0x00000005},
{0x4D2C, 0x0008C0C1},
{0x47B8, 0x00001759},
- {0x47BC, 0x4B702400},
- {0x47C0, 0x831508BA},
+ {0x47BC, 0x4B002402},
+ {0x47C0, 0x831508BC},
{0x4A14, 0x000000E9},
- {0x4D30, 0x00000001},
+ {0x4D30, 0x00000000},
{0x4E94, 0x000000FC},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x47C4, 0x9ABBCACB},
{0x47C8, 0x56767578},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0xA0000000, 0x00000000},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0xB0000000, 0x00000000},
{0x47CC, 0xBBCCBBB3},
{0x47D0, 0x57889989},
{0x47D4, 0x00000F45},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x4D34, 0x7BB167AB},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4D34, 0x7BB1579A},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4D34, 0x7BB167AB},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4D34, 0x7BB1579A},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4D34, 0x7BB1579A},
+ {0xA0000000, 0x00000000},
+ {0x4D34, 0x7BB167AB},
+ {0xB0000000, 0x00000000},
{0x4D38, 0xBBBBBB05},
{0x4D3C, 0x777777BB},
{0x4D40, 0x00015277},
@@ -942,7 +1073,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4D48, 0x8C413016},
{0x4D4C, 0xA140B028},
{0x4D50, 0x00150A31},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x481C, 0x576DF814},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x481C, 0x576DF814},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x481C, 0x576BF814},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x481C, 0x576DF814},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x481C, 0x576DF814},
+ {0xA0000000, 0x00000000},
+ {0x481C, 0x576DF814},
+ {0xB0000000, 0x00000000},
{0x4820, 0xA08877AC},
{0x4824, 0x0000007A},
{0x4D54, 0x00001184},
@@ -967,7 +1110,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4D78, 0x994C1502},
{0x4D7C, 0x00017912},
{0x4EDC, 0x00000001},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x484C, 0x0000CA62},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
{0x484C, 0x00008A62},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x484C, 0x0000CA62},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x484C, 0x00008A62},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x484C, 0x00008A62},
+ {0xA0000000, 0x00000000},
+ {0x484C, 0x0000CA62},
+ {0xB0000000, 0x00000000},
{0x4D80, 0x00000002},
{0x4850, 0x00000008},
{0x4854, 0x009B902A},
@@ -1014,7 +1169,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4DA0, 0x8C413016},
{0x4DA4, 0xA140B028},
{0x4DA8, 0x00150A31},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x48D4, 0x576DF814},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x48D4, 0x576BF814},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x48D4, 0x576BF814},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x48D4, 0x576BF814},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x48D4, 0x576BF814},
+ {0xA0000000, 0x00000000},
+ {0x48D4, 0x576DF814},
+ {0xB0000000, 0x00000000},
{0x48D8, 0xA08877AC},
{0x48DC, 0x0000007A},
{0x4DAC, 0x00001184},
@@ -1039,7 +1206,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4DD0, 0x994C1502},
{0x4DD4, 0x00017912},
{0x4EE4, 0x00000001},
- {0x4904, 0x00008A62},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4904, 0x0000CA62},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4904, 0x0000CA62},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4904, 0x0000CA62},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4904, 0x0000CA62},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4904, 0x0000CA62},
+ {0xA0000000, 0x00000000},
+ {0x4904, 0x0000CA62},
+ {0xB0000000, 0x00000000},
{0x4DD8, 0x00000002},
{0x4908, 0x00000008},
{0x490C, 0x80040000},
@@ -1096,8 +1275,8 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x4988, 0x00000000},
{0x498C, 0x00000000},
{0x4E34, 0x00FC0000},
- {0x4E38, 0x0000F800},
- {0x4E3C, 0x00000001},
+ {0x4E38, 0x00000000},
+ {0x4E3C, 0x00000003},
{0x4990, 0x00000000},
{0x4994, 0x00000000},
{0x4998, 0x00000000},
@@ -1134,7 +1313,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x710, 0xEF810000},
{0xC54, 0x1AE1436A},
{0xC58, 0x41000000},
- {0xC68, 0x10000050},
+ {0xC68, 0x90000050},
{0xC6C, 0x20061020},
{0x704, 0x601E0100},
{0xC74, 0x00000000},
@@ -1225,12 +1404,12 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x328, 0xE000E000},
{0x32C, 0x0041E000},
{0x35C, 0x000004C4},
- {0xC0D4, 0xA7C41460},
+ {0xC0D4, 0xA7441460},
{0xC0D8, 0xC6BA7F67},
{0xC0DC, 0x30C52868},
{0xC0E0, 0x75008128},
{0xC0E4, 0x0000272B},
- {0xC1D4, 0xA7C41460},
+ {0xC1D4, 0xA7441460},
{0xC1D8, 0xC6BA7F67},
{0xC1DC, 0x30C52868},
{0xC1E0, 0x75008128},
@@ -1290,7 +1469,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0xC8C, 0x02F2FC08},
{0xC70, 0x071BFC00},
{0x980, 0x10002251},
- {0x988, 0x3C3C4107},
+ {0x988, 0x3C3C8107},
{0x904, 0x00000005},
{0x994, 0x00000010},
{0x000, 0x0580801F},
@@ -1359,7 +1538,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x2310, 0xBC80536C},
{0x2314, 0x0363A0F3},
{0x2318, 0x000000BB},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x724, 0x00111200},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x724, 0x20111100},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x724, 0x20111100},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x724, 0x01100100},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x724, 0x01100100},
+ {0xA0000000, 0x00000000},
+ {0x724, 0x00111200},
+ {0xB0000000, 0x00000000},
{0x704, 0x601E0D00},
{0xC78, 0xBFFFFFFF},
{0x704, 0x601E0D02},
@@ -1393,7 +1584,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0xC60, 0x017FFFF3},
{0xC70, 0x071BFE00},
{0xC70, 0x071BFE60},
- {0xC6C, 0x20061021},
+ {0xC6C, 0x26061021},
{0x58AC, 0x08000000},
{0x78AC, 0x08000000},
{0x8120, 0x10000000},
@@ -1452,7 +1643,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x12A0, 0x24903056},
{0x12AC, 0x12333121},
{0x12B8, 0x30020000},
- {0x2000, 0x18BBBF84},
+ {0x2000, 0x20BBBF04},
{0x2C14, 0x85000005},
{0x3200, 0x00010142},
{0x32A0, 0x24903056},
@@ -1469,7 +1660,21 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x76C8, 0x0E800400},
{0x984, 0x000000E0},
{0x2008, 0x000FFFFF},
+ {0x1210, 0x8049E304},
+ {0x3210, 0x8049E304},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x58B0, 0x00000800},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x58B0, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x58B0, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x58B0, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x58B0, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x58B0, 0x00000800},
+ {0xB0000000, 0x00000000},
{0x5A00, 0x00000000},
{0x5A04, 0x00000000},
{0x5A08, 0x00000000},
@@ -1479,7 +1684,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x5A18, 0x00000000},
{0x5A1C, 0x00000000},
{0x5A20, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A24, 0x00050000},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A24, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A24, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A24, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A24, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x5A24, 0x00050000},
+ {0xB0000000, 0x00000000},
{0x5A28, 0x00000000},
{0x5A2C, 0x00000000},
{0x5A30, 0x00000000},
@@ -1487,14 +1704,38 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x5A38, 0x00000000},
{0x5A3C, 0x00000000},
{0x5A40, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A44, 0x00000005},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A44, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A44, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A44, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A44, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x5A44, 0x00000005},
+ {0xB0000000, 0x00000000},
{0x5A48, 0x00000000},
{0x5A4C, 0x00000000},
{0x5A50, 0x00000000},
{0x5A54, 0x00000000},
{0x5A58, 0x00000000},
{0x5A5C, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A60, 0x00050000},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A60, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A60, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A60, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5A60, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x5A60, 0x00050000},
+ {0xB0000000, 0x00000000},
{0x5A64, 0x00000000},
{0x5A68, 0x00000000},
{0x5A6C, 0x00000000},
@@ -1514,12 +1755,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x5AA4, 0x00000000},
{0x5AA8, 0x00000000},
{0x5AAC, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5AB0, 0x00050005},
+ {0x5AB4, 0x00050005},
+ {0x5AB8, 0x00050005},
+ {0x5ABC, 0x00050005},
+ {0x5AC0, 0x00000005},
+ {0x78B0, 0x00000800},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5AB0, 0x00000000},
+ {0x5AB4, 0x00000000},
+ {0x5AB8, 0x00000000},
+ {0x5ABC, 0x00000000},
+ {0x5AC0, 0x00000000},
+ {0x78B0, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5AB0, 0x00000000},
+ {0x5AB4, 0x00000000},
+ {0x5AB8, 0x00000000},
+ {0x5ABC, 0x00000000},
+ {0x5AC0, 0x00000000},
+ {0x78B0, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5AB0, 0x00000000},
+ {0x5AB4, 0x00000000},
+ {0x5AB8, 0x00000000},
+ {0x5ABC, 0x00000000},
+ {0x5AC0, 0x00000000},
+ {0x78B0, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5AB0, 0x00000000},
+ {0x5AB4, 0x00000000},
+ {0x5AB8, 0x00000000},
+ {0x5ABC, 0x00000000},
+ {0x5AC0, 0x00000000},
+ {0x78B0, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x5AB0, 0x00050005},
{0x5AB4, 0x00050005},
{0x5AB8, 0x00050005},
{0x5ABC, 0x00050005},
{0x5AC0, 0x00000005},
{0x78B0, 0x00000800},
+ {0xB0000000, 0x00000000},
{0x7A00, 0x00000000},
{0x7A04, 0x00000000},
{0x7A08, 0x00000000},
@@ -1529,7 +1807,19 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x7A18, 0x00000000},
{0x7A1C, 0x00000000},
{0x7A20, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A24, 0x00050000},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A24, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A24, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A24, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A24, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x7A24, 0x00050000},
+ {0xB0000000, 0x00000000},
{0x7A28, 0x00000000},
{0x7A2C, 0x00000000},
{0x7A30, 0x00000000},
@@ -1537,14 +1827,38 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x7A38, 0x00000000},
{0x7A3C, 0x00000000},
{0x7A40, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x7A44, 0x00000005},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A44, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A44, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A44, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A44, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x7A44, 0x00000005},
+ {0xB0000000, 0x00000000},
{0x7A48, 0x00000000},
{0x7A4C, 0x00000000},
{0x7A50, 0x00000000},
{0x7A54, 0x00000000},
{0x7A58, 0x00000000},
{0x7A5C, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x7A60, 0x00050000},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A60, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A60, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A60, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7A60, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x7A60, 0x00050000},
+ {0xB0000000, 0x00000000},
{0x7A64, 0x00000000},
{0x7A68, 0x00000000},
{0x7A6C, 0x00000000},
@@ -1564,143 +1878,223 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
{0x7AA4, 0x00000000},
{0x7AA8, 0x00000000},
{0x7AAC, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7AB0, 0x00050005},
+ {0x7AB4, 0x00050005},
+ {0x7AB8, 0x00050005},
+ {0x7ABC, 0x00050005},
+ {0x7AC0, 0x00000005},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7AB0, 0x00000000},
+ {0x7AB4, 0x00000000},
+ {0x7AB8, 0x00000000},
+ {0x7ABC, 0x00000000},
+ {0x7AC0, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7AB0, 0x00000000},
+ {0x7AB4, 0x00000000},
+ {0x7AB8, 0x00000000},
+ {0x7ABC, 0x00000000},
+ {0x7AC0, 0x00000000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7AB0, 0x00000000},
+ {0x7AB4, 0x00000000},
+ {0x7AB8, 0x00000000},
+ {0x7ABC, 0x00000000},
+ {0x7AC0, 0x00000000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x7AB0, 0x00000000},
+ {0x7AB4, 0x00000000},
+ {0x7AB8, 0x00000000},
+ {0x7ABC, 0x00000000},
+ {0x7AC0, 0x00000000},
+ {0xA0000000, 0x00000000},
{0x7AB0, 0x00050005},
{0x7AB4, 0x00050005},
{0x7AB8, 0x00050005},
{0x7ABC, 0x00050005},
{0x7AC0, 0x00000005},
+ {0xB0000000, 0x00000000},
{0x0F0, 0x00010000},
- {0x0F4, 0x00000018},
- {0x0F8, 0x20220120},
+ {0x0F4, 0x00000028},
+ {0x0F8, 0x20220610},
};
static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
{0xF0FF0000, 0x00000000},
{0xF03300FF, 0x00000001},
- {0x000, 0x01E3C39F},
- {0x001, 0x00694727},
- {0x002, 0x00005536},
- {0x100, 0x02E3C39F},
- {0x101, 0x0069472A},
+ {0x000, 0x0EEECAA6},
+ {0x001, 0x006C4B2C},
+ {0x002, 0x00005636},
+ {0x100, 0x0DEFCAA9},
+ {0x101, 0x00694B2C},
{0x102, 0x00005536},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x10000, 0x1A02E1C9},
{0x10001, 0x00644A30},
{0x10002, 0x00006750},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x10000, 0x0EF4D1B9},
- {0x10001, 0x00584125},
- {0x10002, 0x00006750},
+ {0x10000, 0x0BF1CEB6},
+ {0x10001, 0x00434328},
+ {0x10002, 0x00005050},
{0xA0000000, 0x00000000},
- {0x10000, 0x1A02E1C9},
- {0x10001, 0x00644A30},
- {0x10002, 0x00006750},
+ {0x10000, 0x1D08E8D0},
+ {0x10001, 0x00644C32},
+ {0x10002, 0x00006650},
{0xB0000000, 0x00000000},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x10100, 0x1901E1C8},
{0x10101, 0x0061482D},
{0x10102, 0x00006750},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x10100, 0x04E8C5AD},
- {0x10101, 0x00594125},
- {0x10102, 0x00006850},
+ {0x10100, 0x0BF0CEB8},
+ {0x10101, 0x00424227},
+ {0x10102, 0x00005050},
{0xA0000000, 0x00000000},
- {0x10100, 0x1901E1C8},
- {0x10101, 0x0061482D},
- {0x10102, 0x00006750},
+ {0x10100, 0x1F0AECD5},
+ {0x10101, 0x00634B31},
+ {0x10102, 0x00006550},
{0xB0000000, 0x00000000},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x20000, 0x1601E2CA},
{0x20001, 0x005D452A},
{0x20002, 0x00006750},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x20000, 0x0EF4D3BB},
- {0x20001, 0x00563F25},
- {0x20002, 0x00006850},
+ {0x20000, 0x0EF5D3BB},
+ {0x20001, 0x00454529},
+ {0x20002, 0x00005050},
{0xA0000000, 0x00000000},
- {0x20000, 0x1601E2CA},
- {0x20001, 0x005D452A},
- {0x20002, 0x00006750},
+ {0x20000, 0x1904E6CE},
+ {0x20001, 0x0060482D},
+ {0x20002, 0x00006650},
{0xB0000000, 0x00000000},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x20100, 0x1901E1C8},
{0x20101, 0x0061482D},
{0x20102, 0x00006750},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x20100, 0x0BF1CFB7},
- {0x20101, 0x00574025},
- {0x20102, 0x00006750},
+ {0x20100, 0x12F8D7C1},
+ {0x20101, 0x004A4A2E},
+ {0x20102, 0x00005050},
{0xA0000000, 0x00000000},
- {0x20100, 0x1901E1C8},
+ {0x20100, 0x1F0AECD5},
{0x20101, 0x0061482D},
- {0x20102, 0x00006750},
+ {0x20102, 0x00006550},
{0xB0000000, 0x00000000},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x30000, 0x1700E1CA},
{0x30001, 0x005E472B},
{0x30002, 0x00006750},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x30000, 0x05EFCEB7},
- {0x30001, 0x004B351A},
- {0x30002, 0x00006850},
+ {0x30000, 0x0DF6D5BE},
+ {0x30001, 0x00414126},
+ {0x30002, 0x00005050},
{0xA0000000, 0x00000000},
- {0x30000, 0x1700E1CA},
- {0x30001, 0x005E472B},
- {0x30002, 0x00006750},
+ {0x30000, 0x14FEE0CA},
+ {0x30001, 0x005C4328},
+ {0x30002, 0x00006650},
{0xB0000000, 0x00000000},
{0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x30100, 0x14FEE0C9},
{0x30101, 0x00594428},
{0x30102, 0x00006650},
{0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
- {0x30100, 0x0CF2D1B9},
- {0x30101, 0x00563F24},
- {0x30102, 0x00006750},
+ {0x30100, 0x0EF5D5C0},
+ {0x30101, 0x0045452A},
+ {0x30102, 0x00005050},
{0xA0000000, 0x00000000},
- {0x30100, 0x14FEE0C9},
- {0x30101, 0x00594428},
+ {0x30100, 0x1F0AECD8},
+ {0x30101, 0x00654C31},
{0x30102, 0x00006650},
{0xB0000000, 0x00000000},
- {0x40000, 0x13FCDDC8},
- {0x40001, 0x005D4328},
- {0x40002, 0x00006850},
- {0x40100, 0x14FEE3CF},
- {0x40101, 0x00583E24},
- {0x40102, 0x00006850},
- {0x50000, 0x0DF4D6C6},
- {0x50001, 0x00604227},
- {0x50002, 0x00006850},
- {0x50100, 0x1903E7D5},
- {0x50101, 0x0061462B},
- {0x50102, 0x00006850},
- {0x60000, 0x0FF5D7C6},
- {0x60001, 0x005D4429},
- {0x60002, 0x00006850},
- {0x60100, 0x12FADECF},
- {0x60101, 0x005B4126},
- {0x60102, 0x00006850},
- {0x70000, 0x09F1D2C3},
- {0x70001, 0x00554026},
- {0x70002, 0x00006750},
- {0x70100, 0x0CF5DACC},
- {0x70101, 0x00563E25},
- {0x70102, 0x00006750},
+ {0x40000, 0x15FEE0CB},
+ {0x40001, 0x0060462B},
+ {0x40002, 0x00006450},
+ {0x40100, 0x1902E5D2},
+ {0x40101, 0x0063482E},
+ {0x40102, 0x00006450},
+ {0x50000, 0x1C04E6D3},
+ {0x50001, 0x006B5034},
+ {0x50002, 0x00006450},
+ {0x50100, 0x2009EDDB},
+ {0x50101, 0x006B5035},
+ {0x50102, 0x00006450},
+ {0x60000, 0x16FEE1CF},
+ {0x60001, 0x00634A2E},
+ {0x60002, 0x00006550},
+ {0x60100, 0x14FDE2D2},
+ {0x60101, 0x005E4429},
+ {0x60102, 0x00006450},
+ {0x70000, 0x0BF3D6C6},
+ {0x70001, 0x00573F24},
+ {0x70002, 0x00006550},
+ {0x70100, 0x08F0D6C7},
+ {0x70101, 0x0052391E},
+ {0x70102, 0x00006450},
{0x2000000, 0x02E4C4A0},
{0x2000001, 0x006A4828},
{0x2000100, 0x02E4C5A1},
{0x2000101, 0x00664629},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x2010000, 0x05EBC8AF},
{0x2010001, 0x00543D24},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2010000, 0x08EDCAB2},
+ {0x2010001, 0x00434327},
+ {0xA0000000, 0x00000000},
+ {0x2010000, 0x05EBC8AF},
+ {0x2010001, 0x00543D24},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2010100, 0x07ECC9B0},
+ {0x2010101, 0x005B4126},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2010100, 0x08ECCBB2},
+ {0x2010101, 0x003C3C20},
+ {0xA0000000, 0x00000000},
{0x2010100, 0x07ECC9B0},
{0x2010101, 0x005B4126},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x2020000, 0x05EDCCB2},
{0x2020001, 0x004D361C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2020000, 0x0CF4D2BA},
+ {0x2020001, 0x00404025},
+ {0xA0000000, 0x00000000},
+ {0x2020000, 0x05EDCCB2},
+ {0x2020001, 0x004D361C},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x2020100, 0x06ECCBB2},
{0x2020101, 0x00553D22},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2020100, 0x09EECDB8},
+ {0x2020101, 0x00444428},
+ {0xA0000000, 0x00000000},
+ {0x2020100, 0x06ECCBB2},
+ {0x2020101, 0x00553D22},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x2030000, 0x02ECCCB3},
{0x2030001, 0x00483118},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2030000, 0x0DF8D6BF},
+ {0x2030001, 0x003F3F24},
+ {0xA0000000, 0x00000000},
+ {0x2030000, 0x02ECCCB3},
+ {0x2030001, 0x00483118},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2030100, 0x04ECCCB2},
+ {0x2030101, 0x004F381C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x2030100, 0x08EFCDBA},
+ {0x2030101, 0x00414126},
+ {0xA0000000, 0x00000000},
{0x2030100, 0x04ECCCB2},
{0x2030101, 0x004F381C},
+ {0xB0000000, 0x00000000},
{0x3000000, 0x00000000},
{0x3000001, 0x00000000},
{0x3000002, 0x00000000},
@@ -1709,30 +2103,102 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
{0x3000101, 0x00000000},
{0x3000102, 0x00000000},
{0x3000103, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x3010000, 0x0E0CFB0A},
{0x3010001, 0x00100F06},
{0x3010002, 0x34333333},
{0x3010003, 0x3434343C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3010000, 0x0E0CFB0A},
+ {0x3010001, 0x00100F06},
+ {0x3010002, 0x34333327},
+ {0x3010003, 0x3434343C},
+ {0xA0000000, 0x00000000},
+ {0x3010000, 0x0E0CFB0A},
+ {0x3010001, 0x00100F06},
+ {0x3010002, 0x34333333},
+ {0x3010003, 0x3434343C},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x3010100, 0x0E0CFB0A},
{0x3010101, 0x00100F06},
{0x3010102, 0x34333333},
{0x3010103, 0x3434343C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3010100, 0x0E0CFB0A},
+ {0x3010101, 0x00100F06},
+ {0x3010102, 0x34333327},
+ {0x3010103, 0x3434343C},
+ {0xA0000000, 0x00000000},
+ {0x3010100, 0x0E0CFB0A},
+ {0x3010101, 0x00100F06},
+ {0x3010102, 0x34333333},
+ {0x3010103, 0x3434343C},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3020000, 0x0E0CFB0A},
+ {0x3020001, 0x00100F06},
+ {0x3020002, 0x34333333},
+ {0x3020003, 0x3434343C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3020000, 0x0E0CFB0A},
+ {0x3020001, 0x00100F06},
+ {0x3020002, 0x34333327},
+ {0x3020003, 0x3434343C},
+ {0xA0000000, 0x00000000},
{0x3020000, 0x0E0CFB0A},
{0x3020001, 0x00100F06},
{0x3020002, 0x34333333},
{0x3020003, 0x3434343C},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
{0x3020100, 0x0E0CFB0A},
{0x3020101, 0x00100F06},
{0x3020102, 0x34333333},
{0x3020103, 0x3434343C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3020100, 0x0E0CFB0A},
+ {0x3020101, 0x00100F06},
+ {0x3020102, 0x34333327},
+ {0x3020103, 0x3434343C},
+ {0xA0000000, 0x00000000},
+ {0x3020100, 0x0E0CFB0A},
+ {0x3020101, 0x00100F06},
+ {0x3020102, 0x34333333},
+ {0x3020103, 0x3434343C},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3030000, 0x0E0CFB0A},
+ {0x3030001, 0x00100F06},
+ {0x3030002, 0x34333333},
+ {0x3030003, 0x3434343C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3030000, 0x0E0CFB0A},
+ {0x3030001, 0x00100F06},
+ {0x3030002, 0x34333327},
+ {0x3030003, 0x3434343C},
+ {0xA0000000, 0x00000000},
{0x3030000, 0x0E0CFB0A},
{0x3030001, 0x00100F06},
{0x3030002, 0x34333333},
{0x3030003, 0x3434343C},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3030100, 0x0E0CFB0A},
+ {0x3030101, 0x00100F06},
+ {0x3030102, 0x34333333},
+ {0x3030103, 0x3434343C},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x3030100, 0x0E0CFB0A},
+ {0x3030101, 0x00100F06},
+ {0x3030102, 0x34333327},
+ {0x3030103, 0x3434343C},
+ {0xA0000000, 0x00000000},
{0x3030100, 0x0E0CFB0A},
{0x3030101, 0x00100F06},
{0x3030102, 0x34333333},
{0x3030103, 0x3434343C},
+ {0xB0000000, 0x00000000},
{0x3040000, 0x0E0CFB0A},
{0x3040001, 0x00100F06},
{0x3040002, 0x343B3333},
@@ -1765,6 +2231,310 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
{0x3070101, 0x00100F06},
{0x3070102, 0x3C3B3333},
{0x3070103, 0x34343C3C},
+ {0x4000000, 0x00000000},
+ {0x4000001, 0x76543210},
+ {0x4000002, 0x77777777},
+ {0x4000003, 0x35374425},
+ {0x4000004, 0x00000043},
+ {0x4000005, 0x000038E8},
+ {0x4000100, 0x00000000},
+ {0x4000101, 0x76543210},
+ {0x4000102, 0x77777777},
+ {0x4000103, 0x35374425},
+ {0x4000104, 0x00000043},
+ {0x4000105, 0x000038E8},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4010000, 0x00000000},
+ {0x4010001, 0x76543210},
+ {0x4010002, 0x77777777},
+ {0x4010003, 0x35374425},
+ {0x4010004, 0x00000042},
+ {0x4010005, 0x000038E8},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4010000, 0x0000FC50},
+ {0x4010001, 0x51403210},
+ {0x4010002, 0x76543276},
+ {0x4010003, 0x3A4DAA3C},
+ {0x4010004, 0x00000093},
+ {0x4010005, 0x000040E4},
+ {0xA0000000, 0x00000000},
+ {0x4010000, 0x00000000},
+ {0x4010001, 0x76543210},
+ {0x4010002, 0x77777777},
+ {0x4010003, 0x35374425},
+ {0x4010004, 0x00000042},
+ {0x4010005, 0x000038E8},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4010100, 0x00000000},
+ {0x4010101, 0x76543210},
+ {0x4010102, 0x77777777},
+ {0x4010103, 0x35374425},
+ {0x4010104, 0x00000042},
+ {0x4010105, 0x000038E8},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4010100, 0x0000FC50},
+ {0x4010101, 0x51403210},
+ {0x4010102, 0x76543276},
+ {0x4010103, 0x3A4DAA3C},
+ {0x4010104, 0x00000093},
+ {0x4010105, 0x000040E4},
+ {0xA0000000, 0x00000000},
+ {0x4010100, 0x00000000},
+ {0x4010101, 0x76543210},
+ {0x4010102, 0x77777777},
+ {0x4010103, 0x35374425},
+ {0x4010104, 0x00000042},
+ {0x4010105, 0x000038E8},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4020000, 0x00000000},
+ {0x4020001, 0x76543210},
+ {0x4020002, 0x77777777},
+ {0x4020003, 0x35374425},
+ {0x4020004, 0x00000042},
+ {0x4020005, 0x000038E8},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4020000, 0x0000FC50},
+ {0x4020001, 0x51403210},
+ {0x4020002, 0x76543276},
+ {0x4020003, 0x4B4DAA3C},
+ {0x4020004, 0x000000A3},
+ {0x4020005, 0x000040E4},
+ {0xA0000000, 0x00000000},
+ {0x4020000, 0x00000000},
+ {0x4020001, 0x76543210},
+ {0x4020002, 0x77777777},
+ {0x4020003, 0x35374425},
+ {0x4020004, 0x00000042},
+ {0x4020005, 0x000038E8},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4020100, 0x00000000},
+ {0x4020101, 0x76543210},
+ {0x4020102, 0x77777777},
+ {0x4020103, 0x35374425},
+ {0x4020104, 0x00000042},
+ {0x4020105, 0x000038E8},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4020100, 0x0000FC50},
+ {0x4020101, 0x51403210},
+ {0x4020102, 0x76543276},
+ {0x4020103, 0x3A4DAA3C},
+ {0x4020104, 0x00000093},
+ {0x4020105, 0x000040E4},
+ {0xA0000000, 0x00000000},
+ {0x4020100, 0x00000000},
+ {0x4020101, 0x76543210},
+ {0x4020102, 0x77777777},
+ {0x4020103, 0x35374425},
+ {0x4020104, 0x00000042},
+ {0x4020105, 0x000038E8},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4030000, 0x00000000},
+ {0x4030001, 0x76543210},
+ {0x4030002, 0x77777777},
+ {0x4030003, 0x35374425},
+ {0x4030004, 0x00000042},
+ {0x4030005, 0x000038E8},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4030000, 0x0000FC50},
+ {0x4030001, 0x51403210},
+ {0x4030002, 0x76543276},
+ {0x4030003, 0x3A4DAA3C},
+ {0x4030004, 0x00000093},
+ {0x4030005, 0x000040E4},
+ {0xA0000000, 0x00000000},
+ {0x4030000, 0x00000000},
+ {0x4030001, 0x76543210},
+ {0x4030002, 0x77777777},
+ {0x4030003, 0x35374425},
+ {0x4030004, 0x00000042},
+ {0x4030005, 0x000038E8},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4030100, 0x00000000},
+ {0x4030101, 0x76543210},
+ {0x4030102, 0x77777777},
+ {0x4030103, 0x35374425},
+ {0x4030104, 0x00000042},
+ {0x4030105, 0x000038E8},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4030100, 0x0000FC50},
+ {0x4030101, 0x51403210},
+ {0x4030102, 0x76543276},
+ {0x4030103, 0x3A4DAA3C},
+ {0x4030104, 0x00000093},
+ {0x4030105, 0x000040E4},
+ {0xA0000000, 0x00000000},
+ {0x4030100, 0x00000000},
+ {0x4030101, 0x76543210},
+ {0x4030102, 0x77777777},
+ {0x4030103, 0x35374425},
+ {0x4030104, 0x00000042},
+ {0x4030105, 0x000038E8},
+ {0xB0000000, 0x00000000},
+ {0x1000000, 0x00000008},
+ {0x1000010, 0x00000008},
+ {0x1000011, 0x00000000},
+ {0x1000100, 0x00000004},
+ {0x1000110, 0x00000004},
+ {0x1000111, 0x00000000},
+ {0x1010000, 0x00000004},
+ {0x1010010, 0x00000004},
+ {0x1010011, 0x00000000},
+ {0x1010020, 0x00000004},
+ {0x1010021, 0x00000000},
+ {0x1010029, 0x00000000},
+ {0x1010030, 0x00000000},
+ {0x1010031, 0x00000000},
+ {0x1010035, 0x00000000},
+ {0x1010039, 0x00000000},
+ {0x101003D, 0x00000000},
+ {0x1010100, 0x00000010},
+ {0x1010110, 0x00000010},
+ {0x1010111, 0x00000000},
+ {0x1010120, 0x00000010},
+ {0x1010121, 0x00000000},
+ {0x1010129, 0x00000000},
+ {0x1010030, 0x00000000},
+ {0x1010031, 0x00000000},
+ {0x1010035, 0x00000000},
+ {0x1010039, 0x00000000},
+ {0x101003D, 0x00000000},
+ {0x1020000, 0x000000FA},
+ {0x1020010, 0x000000FA},
+ {0x1020011, 0x00000000},
+ {0x1020020, 0x000000FA},
+ {0x1020021, 0x00000000},
+ {0x1020029, 0x00000000},
+ {0x1020030, 0x00000000},
+ {0x1020031, 0x00000000},
+ {0x1020035, 0x00000000},
+ {0x1020039, 0x00000000},
+ {0x102003D, 0x00000000},
+ {0x1020100, 0x0000000D},
+ {0x1020110, 0x0000000D},
+ {0x1020111, 0x00000000},
+ {0x1020120, 0x0000000D},
+ {0x1020121, 0x00000000},
+ {0x1020129, 0x00000000},
+ {0x1020030, 0x00000000},
+ {0x1020031, 0x00000000},
+ {0x1020035, 0x00000000},
+ {0x1020039, 0x00000000},
+ {0x102003D, 0x00000000},
+ {0x1030000, 0x000000E4},
+ {0x1030010, 0x000000E4},
+ {0x1030011, 0x00000000},
+ {0x1030020, 0x0000E8E8},
+ {0x1030021, 0x00000000},
+ {0x1030029, 0x00000000},
+ {0x1030030, 0x00000000},
+ {0x1030031, 0x00000000},
+ {0x1030035, 0x00000000},
+ {0x1030039, 0x00000000},
+ {0x103003D, 0x00000000},
+ {0x1030100, 0x00000018},
+ {0x1030110, 0x00000018},
+ {0x1030111, 0x00000000},
+ {0x1030120, 0x00000018},
+ {0x1030121, 0x00000000},
+ {0x1030129, 0x00000000},
+ {0x1030030, 0x00000000},
+ {0x1030031, 0x00000000},
+ {0x1030035, 0x00000000},
+ {0x1030039, 0x00000000},
+ {0x103003D, 0x00000000},
+ {0x1040000, 0x000000EE},
+ {0x1040010, 0x000000EE},
+ {0x1040011, 0x00000000},
+ {0x1040020, 0x000000EE},
+ {0x1040021, 0x00000000},
+ {0x1040029, 0x00000000},
+ {0x1040030, 0x000000EE},
+ {0x1040031, 0x00000000},
+ {0x1040035, 0x00000000},
+ {0x1040039, 0x00000000},
+ {0x104003D, 0x00000000},
+ {0x1040100, 0x00000000},
+ {0x1040110, 0x00000005},
+ {0x1040111, 0x00000000},
+ {0x1040120, 0x00000008},
+ {0x1040121, 0x00000000},
+ {0x1040129, 0x00000000},
+ {0x1040030, 0x00000008},
+ {0x1040031, 0x00000000},
+ {0x1040035, 0x00000000},
+ {0x1040039, 0x00000000},
+ {0x104003D, 0x00000000},
+ {0x1050000, 0x00000008},
+ {0x1050010, 0x0000000B},
+ {0x1050011, 0x00000000},
+ {0x1050020, 0x00000015},
+ {0x1050021, 0x00000000},
+ {0x1050029, 0x00000000},
+ {0x1050030, 0x00000010},
+ {0x1050031, 0x00000000},
+ {0x1050035, 0x00000000},
+ {0x1050039, 0x00000000},
+ {0x105003D, 0x00000000},
+ {0x1050100, 0x00000016},
+ {0x1050110, 0x00000016},
+ {0x1050111, 0x0000F8F8},
+ {0x1050120, 0x0000001A},
+ {0x1050121, 0x00000000},
+ {0x1050129, 0x00000000},
+ {0x1050030, 0x0000001A},
+ {0x1050031, 0x00000000},
+ {0x1050035, 0x00000000},
+ {0x1050039, 0x00000000},
+ {0x105003D, 0x00000000},
+ {0x1060000, 0x000000F8},
+ {0x1060010, 0x000000F8},
+ {0x1060011, 0x00000000},
+ {0x1060020, 0x00000000},
+ {0x1060021, 0x00000000},
+ {0x1060029, 0x00000000},
+ {0x1060030, 0x00000000},
+ {0x1060031, 0x00000000},
+ {0x1060035, 0x00000000},
+ {0x1060039, 0x00000000},
+ {0x106003D, 0x00000000},
+ {0x1060100, 0x000000F6},
+ {0x1060110, 0x000000F6},
+ {0x1060111, 0x00000000},
+ {0x1060120, 0x000000F6},
+ {0x1060121, 0x00000000},
+ {0x1060129, 0x00000000},
+ {0x1060030, 0x00000000},
+ {0x1060031, 0x00000000},
+ {0x1060035, 0x00000000},
+ {0x1060039, 0x00000000},
+ {0x106003D, 0x00000000},
+ {0x1070000, 0x000000E8},
+ {0x1070010, 0x000000E8},
+ {0x1070011, 0x00000000},
+ {0x1070020, 0x000000E8},
+ {0x1070021, 0x00000000},
+ {0x1070029, 0x00000000},
+ {0x1070030, 0x000000F0},
+ {0x1070031, 0x00000000},
+ {0x1070035, 0x00000000},
+ {0x1070039, 0x00000000},
+ {0x107003D, 0x00000000},
+ {0x1070100, 0x000000E4},
+ {0x1070110, 0x000000E4},
+ {0x1070111, 0x00000000},
+ {0x1070120, 0x000000E4},
+ {0x1070121, 0x00000000},
+ {0x1070129, 0x00000000},
+ {0x1070030, 0x000000F0},
+ {0x1070031, 0x00000000},
+ {0x1070035, 0x00000000},
+ {0x1070039, 0x00000000},
+ {0x107003D, 0x00000000},
};
static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index b889e7bf34c0..9d4c6b6fa125 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -298,7 +298,9 @@
le32_get_bits(*((const __le32 *)ie), GENMASK(11, 5))
#define RTW89_GET_PHY_STS_IE01_CH_IDX(ie) \
le32_get_bits(*((const __le32 *)ie), GENMASK(23, 16))
-#define RTW89_GET_PHY_STS_IE01_CFO(ie) \
+#define RTW89_GET_PHY_STS_IE01_FD_CFO(ie) \
+ le32_get_bits(*((const __le32 *)(ie) + 1), GENMASK(19, 8))
+#define RTW89_GET_PHY_STS_IE01_PREMB_CFO(ie) \
le32_get_bits(*((const __le32 *)(ie) + 1), GENMASK(31, 20))
enum rtw89_tx_channel {
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index 1ae80b7561da..e2ed4565025d 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -44,4 +44,15 @@ static inline s32 s32_div_u32_round_closest(s32 dividend, u32 divisor)
return s32_div_u32_round_down(dividend + divisor / 2, divisor, NULL);
}
+static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask)
+{
+ int i;
+
+ eth_zero_addr(dst);
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (mask & BIT(i))
+ dst[i] = src[i];
+ }
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
new file mode 100644
index 000000000000..b2b826b2e09a
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+#include "cam.h"
+#include "core.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "ps.h"
+#include "reg.h"
+#include "util.h"
+#include "wow.h"
+
+static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
+{
+ __rtw89_leave_ps_mode(rtwdev);
+}
+
+static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ __rtw89_enter_ps_mode(rtwdev, rtwvif);
+}
+
+static void rtw89_wow_enter_lps(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ rtw89_enter_lps(rtwdev, rtwvif);
+}
+
+static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev)
+{
+ rtw89_leave_lps(rtwdev);
+}
+
+static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ int ret;
+
+ if (enable_wow) {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+ rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_write32_clr(rtwdev, R_AX_RX_FLTR_OPT, B_AX_SNIFFER_MODE);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
+ rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
+ } else {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+ rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
+ }
+
+ return 0;
+}
+
+static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_mac_fwd_target fwd_target = enable ?
+ RTW89_FWD_DONT_CARE :
+ RTW89_FWD_TO_HOST;
+
+ rtw89_mac_typ_fltr_opt(rtwdev, RTW89_MGNT, fwd_target, RTW89_MAC_0);
+ rtw89_mac_typ_fltr_opt(rtwdev, RTW89_CTRL, fwd_target, RTW89_MAC_0);
+ rtw89_mac_typ_fltr_opt(rtwdev, RTW89_DATA, fwd_target, RTW89_MAC_0);
+}
+
+static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct cfg80211_wowlan_nd_info nd_info;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ u32 wow_reason_reg;
+ u8 reason;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ wow_reason_reg = R_AX_C2HREG_DATA3 + 3;
+ else
+ wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3;
+
+ reason = rtw89_read8(rtwdev, wow_reason_reg);
+
+ switch (reason) {
+ case RTW89_WOW_RSN_RX_DEAUTH:
+ wakeup.disconnect = true;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx deauth\n");
+ break;
+ case RTW89_WOW_RSN_DISCONNECT:
+ wakeup.disconnect = true;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: AP is off\n");
+ break;
+ case RTW89_WOW_RSN_RX_MAGIC_PKT:
+ wakeup.magic_pkt = true;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx magic packet\n");
+ break;
+ case RTW89_WOW_RSN_RX_GTK_REKEY:
+ wakeup.gtk_rekey_failure = true;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx gtk rekey\n");
+ break;
+ case RTW89_WOW_RSN_RX_PATTERN_MATCH:
+ /* Current firmware and driver don't report pattern index
+ * Use pattern_idx to 0 defaultly.
+ */
+ wakeup.pattern_idx = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx pattern match packet\n");
+ break;
+ case RTW89_WOW_RSN_RX_NLO:
+ /* Current firmware and driver don't report ssid index.
+ * Use 0 for n_matches based on its comment.
+ */
+ nd_info.n_matches = 0;
+ wakeup.net_detect = &nd_info;
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "Rx NLO\n");
+ break;
+ default:
+ rtw89_warn(rtwdev, "Unknown wakeup reason %x\n", reason);
+ ieee80211_report_wowlan_wakeup(rtwdev->wow.wow_vif, NULL,
+ GFP_KERNEL);
+ return;
+ }
+
+ ieee80211_report_wowlan_wakeup(rtwdev->wow.wow_vif, &wakeup,
+ GFP_KERNEL);
+}
+
+static void rtw89_wow_vif_iter(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ /* Current wowlan function support setting of only one STATION vif.
+ * So when one suitable vif is found, stop the iteration.
+ */
+ if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ switch (rtwvif->net_type) {
+ case RTW89_NET_TYPE_INFRA:
+ rtw_wow->wow_vif = vif;
+ break;
+ case RTW89_NET_TYPE_NO_LINK:
+ default:
+ break;
+ }
+}
+
+static u16 __rtw89_cal_crc16(u8 data, u16 crc)
+{
+ u8 shift_in, data_bit;
+ u8 crc_bit4, crc_bit11, crc_bit15;
+ u16 crc_result;
+ int index;
+
+ for (index = 0; index < 8; index++) {
+ crc_bit15 = crc & BIT(15) ? 1 : 0;
+ data_bit = data & BIT(index) ? 1 : 0;
+ shift_in = crc_bit15 ^ data_bit;
+
+ crc_result = crc << 1;
+
+ if (shift_in == 0)
+ crc_result &= ~BIT(0);
+ else
+ crc_result |= BIT(0);
+
+ crc_bit11 = (crc & BIT(11) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit11 == 0)
+ crc_result &= ~BIT(12);
+ else
+ crc_result |= BIT(12);
+
+ crc_bit4 = (crc & BIT(4) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit4 == 0)
+ crc_result &= ~BIT(5);
+ else
+ crc_result |= BIT(5);
+
+ crc = crc_result;
+ }
+ return crc;
+}
+
+static u16 rtw89_calc_crc(u8 *pdata, int length)
+{
+ u16 crc = 0xffff;
+ int i;
+
+ for (i = 0; i < length; i++)
+ crc = __rtw89_cal_crc16(pdata[i], crc);
+
+ /* get 1' complement */
+ return ~crc;
+}
+
+static int rtw89_wow_pattern_get_type(struct rtw89_vif *rtwvif,
+ struct rtw89_wow_cam_info *rtw_pattern,
+ const u8 *pattern, u8 da_mask)
+{
+ u8 da[ETH_ALEN];
+
+ ether_addr_copy_mask(da, pattern, da_mask);
+
+ /* Each pattern is divided into different kinds by DA address
+ * a. DA is broadcast address: set bc = 0;
+ * b. DA is multicast address: set mc = 0
+ * c. DA is unicast address same as dev's mac address: set uc = 0
+ * d. DA is unmasked. Also called wildcard type: set uc = bc = mc = 0
+ * e. Others is invalid type.
+ */
+
+ if (is_broadcast_ether_addr(da))
+ rtw_pattern->bc = true;
+ else if (is_multicast_ether_addr(da))
+ rtw_pattern->mc = true;
+ else if (ether_addr_equal(da, rtwvif->mac_addr) &&
+ da_mask == GENMASK(5, 0))
+ rtw_pattern->uc = true;
+ else if (!da_mask) /*da_mask == 0 mean wildcard*/
+ return 0;
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static int rtw89_wow_pattern_generate(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ const struct cfg80211_pkt_pattern *pkt_pattern,
+ struct rtw89_wow_cam_info *rtw_pattern)
+{
+ u8 mask_hw[RTW89_MAX_PATTERN_MASK_SIZE * 4] = {0};
+ u8 content[RTW89_MAX_PATTERN_SIZE] = {0};
+ const u8 *mask;
+ const u8 *pattern;
+ u8 mask_len;
+ u16 count;
+ u32 len;
+ int i, ret;
+
+ pattern = pkt_pattern->pattern;
+ len = pkt_pattern->pattern_len;
+ mask = pkt_pattern->mask;
+ mask_len = DIV_ROUND_UP(len, 8);
+ memset(rtw_pattern, 0, sizeof(*rtw_pattern));
+
+ ret = rtw89_wow_pattern_get_type(rtwvif, rtw_pattern, pattern,
+ mask[0] & GENMASK(5, 0));
+ if (ret)
+ return ret;
+
+ /* translate mask from os to mask for hw
+ * pattern from OS uses 'ethenet frame', like this:
+ * | 6 | 6 | 2 | 20 | Variable | 4 |
+ * |--------+--------+------+-----------+------------+-----|
+ * | 802.3 Mac Header | IP Header | TCP Packet | FCS |
+ * | DA | SA | Type |
+ *
+ * BUT, packet catched by our HW is in '802.11 frame', begin from LLC
+ * | 24 or 30 | 6 | 2 | 20 | Variable | 4 |
+ * |-------------------+--------+------+-----------+------------+-----|
+ * | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS |
+ * | Others | Tpye |
+ *
+ * Therefore, we need translate mask_from_OS to mask_to_hw.
+ * We should left-shift mask by 6 bits, then set the new bit[0~5] = 0,
+ * because new mask[0~5] means 'SA', but our HW packet begins from LLC,
+ * bit[0~5] corresponds to first 6 Bytes in LLC, they just don't match.
+ */
+
+ /* Shift 6 bits */
+ for (i = 0; i < mask_len - 1; i++) {
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6)) |
+ u8_get_bits(mask[i + 1], GENMASK(5, 0)) << 2;
+ }
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+
+ /* Set bit 0-5 to zero */
+ mask_hw[0] &= ~GENMASK(5, 0);
+
+ memcpy(rtw_pattern->mask, mask_hw, sizeof(rtw_pattern->mask));
+
+ /* To get the wake up pattern from the mask.
+ * We do not count first 12 bits which means
+ * DA[6] and SA[6] in the pattern to match HW design.
+ */
+ count = 0;
+ for (i = 12; i < len; i++) {
+ if ((mask[i / 8] >> (i % 8)) & 0x01) {
+ content[count] = pattern[i];
+ count++;
+ }
+ }
+
+ rtw_pattern->crc = rtw89_calc_crc(content, count);
+
+ return 0;
+}
+
+static int rtw89_wow_parse_patterns(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_cam_info *rtw_pattern = rtw_wow->patterns;
+ int i;
+ int ret;
+
+ if (!wowlan->n_patterns || !wowlan->patterns)
+ return 0;
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ rtw_pattern = &rtw_wow->patterns[i];
+ ret = rtw89_wow_pattern_generate(rtwdev, rtwvif,
+ &wowlan->patterns[i],
+ rtw_pattern);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to generate pattern(%d)\n", i);
+ rtw_wow->pattern_cnt = 0;
+ return ret;
+ }
+
+ rtw_pattern->r_w = true;
+ rtw_pattern->idx = i;
+ rtw_pattern->negative_pattern_match = false;
+ rtw_pattern->skip_mac_hdr = true;
+ rtw_pattern->valid = true;
+ }
+ rtw_wow->pattern_cnt = wowlan->n_patterns;
+
+ return 0;
+}
+
+static void rtw89_wow_pattern_clear_cam(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_cam_info *rtw_pattern = rtw_wow->patterns;
+ int i = 0;
+
+ for (i = 0; i < rtw_wow->pattern_cnt; i++) {
+ rtw_pattern = &rtw_wow->patterns[i];
+ rtw_pattern->valid = false;
+ rtw89_fw_wow_cam_update(rtwdev, rtw_pattern);
+ }
+}
+
+static void rtw89_wow_pattern_write(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_cam_info *rtw_pattern = rtw_wow->patterns;
+ int i;
+
+ for (i = 0; i < rtw_wow->pattern_cnt; i++)
+ rtw89_fw_wow_cam_update(rtwdev, rtw_pattern + i);
+}
+
+static void rtw89_wow_pattern_clear(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw89_wow_pattern_clear_cam(rtwdev);
+
+ rtw_wow->pattern_cnt = 0;
+ memset(rtw_wow->patterns, 0, sizeof(rtw_wow->patterns));
+}
+
+static void rtw89_wow_clear_wakeups(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_wow->wow_vif = NULL;
+ rtw89_core_release_all_bits_map(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
+ rtw_wow->pattern_cnt = 0;
+}
+
+static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_vif *rtwvif;
+
+ if (wowlan->disconnect)
+ set_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags);
+ if (wowlan->magic_pkt)
+ set_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags);
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_wow_vif_iter(rtwdev, rtwvif);
+
+ if (!rtw_wow->wow_vif)
+ return -EPERM;
+
+ rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ return rtw89_wow_parse_patterns(rtwdev, rtwvif, wowlan);
+}
+
+static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ struct ieee80211_sta *wow_sta;
+ struct rtw89_sta *rtwsta = NULL;
+ bool is_conn = true;
+ int ret;
+
+ wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
+ if (wow_sta)
+ rtwsta = (struct rtw89_sta *)wow_sta->drv_priv;
+ else
+ is_conn = false;
+
+ if (wow) {
+ if (rtw_wow->pattern_cnt)
+ rtwvif->wowlan_pattern = true;
+ if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
+ rtwvif->wowlan_magic = true;
+ } else {
+ rtwvif->wowlan_pattern = false;
+ rtwvif->wowlan_magic = false;
+ }
+
+ ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif, wow);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to fw wow wakeup ctrl\n");
+ return ret;
+ }
+
+ if (wow) {
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to update dctl cam sec entry: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, !is_conn);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif, wow);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to fw wow global\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
+{
+ u8 polling;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_read8_mask, polling,
+ wow_enable == !!polling,
+ 50, 50000, false, rtwdev,
+ R_AX_WOW_CTRL, B_AX_WOW_WOWEN);
+ if (ret)
+ rtw89_err(rtwdev, "failed to check wow status %s\n",
+ wow_enable ? "enabled" : "disabled");
+ return ret;
+}
+
+static void rtw89_wow_release_pkt_list(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct list_head *pkt_list = &rtw_wow->pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+
+ list_for_each_entry_safe(info, tmp, pkt_list, list) {
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ rtw89_core_release_bit_map(rtwdev->pkt_offload,
+ info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+}
+
+static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+{
+ enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ struct ieee80211_sta *wow_sta;
+ struct rtw89_sta *rtwsta = NULL;
+ bool is_conn = true;
+ int ret;
+
+ rtw89_hci_disable_intr(rtwdev);
+
+ wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
+ if (wow_sta)
+ rtwsta = (struct rtw89_sta *)wow_sta->drv_priv;
+ else
+ is_conn = false;
+
+ ret = rtw89_fw_download(rtwdev, fw_type);
+ if (ret) {
+ rtw89_warn(rtwdev, "download fw failed\n");
+ return ret;
+ }
+
+ rtw89_phy_init_rf_reg(rtwdev, true);
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
+ RTW89_ROLE_FW_RESTORE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role maintain\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c assoc cmac tbl\n");
+ return ret;
+ }
+
+ if (!is_conn)
+ rtw89_cam_reset_keys(rtwdev);
+
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, !is_conn);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ return ret;
+ }
+
+ if (is_conn) {
+ rtw89_phy_ra_assoc(rtwdev, wow_sta);
+ rtw89_phy_set_bss_color(rtwdev, wow_vif);
+ rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif);
+ }
+
+ rtw89_mac_hw_mgnt_sec(rtwdev, wow);
+ rtw89_hci_enable_intr(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_wow_enable_trx_pre(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_hci_ctrl_txdma_ch(rtwdev, false);
+ rtw89_hci_ctrl_txdma_fw_ch(rtwdev, true);
+
+ rtw89_mac_ptk_drop_by_band_and_wait(rtwdev, RTW89_MAC_0);
+
+ ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "txdma ch busy\n");
+ return ret;
+ }
+ rtw89_wow_set_rx_filter(rtwdev, true);
+
+ ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ if (ret) {
+ rtw89_err(rtwdev, "cfg ppdu status\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_wow_enable_trx_post(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_hci_disable_intr(rtwdev);
+ rtw89_hci_ctrl_trxhci(rtwdev, false);
+
+ ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to poll txdma ch idle pcie\n");
+ return ret;
+ }
+
+ ret = rtw89_wow_config_mac(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to config mac\n");
+ return ret;
+ }
+
+ rtw89_wow_set_rx_filter(rtwdev, false);
+ rtw89_hci_reset(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_hci_clr_idx_all(rtwdev);
+
+ ret = rtw89_hci_rst_bdram(rtwdev);
+ if (ret) {
+ rtw89_warn(rtwdev, "reset bdram busy\n");
+ return ret;
+ }
+
+ rtw89_hci_ctrl_trxhci(rtwdev, true);
+ rtw89_hci_ctrl_txdma_ch(rtwdev, true);
+
+ ret = rtw89_wow_config_mac(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to config mac\n");
+ return ret;
+ }
+ rtw89_hci_enable_intr(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ if (ret)
+ rtw89_err(rtwdev, "cfg ppdu status\n");
+
+ return ret;
+}
+
+static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ int ret;
+
+ rtw89_wow_pattern_write(rtwdev);
+
+ ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable keep alive\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_cfg_wake(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to config wake\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_check_fw_status(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to check enable fw ready\n");
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ int ret;
+
+ rtw89_wow_pattern_clear(rtwdev);
+
+ ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable keep alive\n");
+ goto out;
+ }
+
+ rtw89_wow_release_pkt_list(rtwdev);
+
+ ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_cfg_wake(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable config wake\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_check_fw_status(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ set_bit(RTW89_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw89_wow_enable_trx_pre(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable trx_pre\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_swap_fw(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to swap to wow fw\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_fw_start(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to let wow fw start\n");
+ goto out;
+ }
+
+ rtw89_wow_enter_lps(rtwdev);
+
+ ret = rtw89_wow_enable_trx_post(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable trx_post\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ clear_bit(RTW89_FLAG_WOWLAN, rtwdev->flags);
+ return ret;
+}
+
+static int rtw89_wow_disable(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_wow_disable_trx_pre(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable trx_pre\n");
+ goto out;
+ }
+
+ rtw89_wow_leave_lps(rtwdev);
+
+ ret = rtw89_wow_fw_stop(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to swap to normal fw\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_swap_fw(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable trx_post\n");
+ goto out;
+ }
+
+ ret = rtw89_wow_disable_trx_post(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable trx_pre\n");
+ goto out;
+ }
+
+out:
+ clear_bit(RTW89_FLAG_WOWLAN, rtwdev->flags);
+ return ret;
+}
+
+int rtw89_wow_resume(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (!test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
+ rtw89_err(rtwdev, "wow is not enabled\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (!rtw89_mac_get_power_state(rtwdev)) {
+ rtw89_err(rtwdev, "chip is no power when resume\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ rtw89_wow_leave_deep_ps(rtwdev);
+
+ rtw89_wow_show_wakeup_reason(rtwdev);
+
+ ret = rtw89_wow_disable(rtwdev);
+ if (ret)
+ rtw89_err(rtwdev, "failed to disable wow\n");
+
+out:
+ rtw89_wow_clear_wakeups(rtwdev);
+ return ret;
+}
+
+int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan)
+{
+ int ret;
+
+ ret = rtw89_wow_set_wakeups(rtwdev, wowlan);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to set wakeup event\n");
+ return ret;
+ }
+
+ rtw89_wow_leave_lps(rtwdev);
+
+ ret = rtw89_wow_enable(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to enable wow\n");
+ return ret;
+ }
+
+ rtw89_wow_enter_deep_ps(rtwdev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
new file mode 100644
index 000000000000..a2f7b2e3cdb4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_WOW_H__
+#define __RTW89_WOW_H__
+
+enum rtw89_wake_reason {
+ RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
+ RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
+ RTW89_WOW_RSN_RX_DEAUTH = 0x8,
+ RTW89_WOW_RSN_DISCONNECT = 0x10,
+ RTW89_WOW_RSN_RX_MAGIC_PKT = 0x21,
+ RTW89_WOW_RSN_RX_PATTERN_MATCH = 0x23,
+ RTW89_WOW_RSN_RX_NLO = 0x55,
+};
+
+int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
+int rtw89_wow_resume(struct rtw89_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 0f3a80f66b61..ead4d4e04328 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
tid, 0);
}
}
- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+
+ if (IEEE80211_SKB_CB(skb)->control.flags &
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
q_num = MGMT_SOFT_Q;
skb->priority = q_num;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index c61f83a7333b..c7460fbba014 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
u8 header_size;
u8 vap_id = 0;
u8 dword_align_bytes;
+ bool tx_eapol;
u16 seq_num;
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
tx_params = (struct skb_info *)info->driver_data;
+ tx_eapol = IEEE80211_SKB_CB(skb)->control.flags &
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+
header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
if (header_size > skb_headroom(skb)) {
rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
@@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
}
}
- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ if (tx_eapol) {
rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n");
data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2fbec51c8f94..bc1f038d1655 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1958,6 +1958,7 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw)
static const struct ieee80211_ops mac80211_ops = {
.tx = rsi_mac80211_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rsi_mac80211_start,
.stop = rsi_mac80211_stop,
.add_interface = rsi_mac80211_add_interface,
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index 84d82ddded56..6b9864e478ac 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -128,6 +128,7 @@ static const struct ieee80211_ops wfx_ops = {
.remove_interface = wfx_remove_interface,
.config = wfx_config,
.tx = wfx_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.join_ibss = wfx_join_ibss,
.leave_ibss = wfx_leave_ibss,
.conf_tx = wfx_conf_tx,
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 326b1cc1d2bc..381013e0db63 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -209,6 +209,7 @@ static const struct ieee80211_ops cw1200_ops = {
.remove_interface = cw1200_remove_interface,
.change_interface = cw1200_change_interface,
.tx = cw1200_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.hw_scan = cw1200_hw_scan,
.set_tim = cw1200_set_tim,
.sta_notify = cw1200_sta_notify,
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index 7c0b17a76fe2..3fcd9e395f72 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -18,12 +18,4 @@ source "drivers/net/wireless/ti/wl18xx/Kconfig"
# keep last for automatic dependencies
source "drivers/net/wireless/ti/wlcore/Kconfig"
-config WILINK_PLATFORM_DATA
- bool "TI WiLink platform data"
- depends on WLCORE_SDIO || WL1251_SDIO
- default y
- help
- Small platform data bit needed to pass data to the sdio modules.
-
-
endif # WLAN_VENDOR_TI
diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
deleted file mode 100644
index 1de6a62d526f..000000000000
--- a/drivers/net/wireless/ti/wilink_platform_data.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This file is part of wl12xx
- *
- * Copyright (C) 2010-2011 Texas Instruments, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/wl12xx.h>
-
-static struct wl1251_platform_data *wl1251_platform_data;
-
-int __init wl1251_set_platform_data(const struct wl1251_platform_data *data)
-{
- if (wl1251_platform_data)
- return -EBUSY;
- if (!data)
- return -EINVAL;
-
- wl1251_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
- if (!wl1251_platform_data)
- return -ENOMEM;
-
- return 0;
-}
-
-struct wl1251_platform_data *wl1251_get_platform_data(void)
-{
- if (!wl1251_platform_data)
- return ERR_PTR(-ENODEV);
-
- return wl1251_platform_data;
-}
-EXPORT_SYMBOL(wl1251_get_platform_data);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 289371689a8d..eded284af600 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1359,6 +1359,7 @@ static const struct ieee80211_ops wl1251_ops = {
.prepare_multicast = wl1251_op_prepare_multicast,
.configure_filter = wl1251_op_configure_filter,
.tx = wl1251_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_key = wl1251_op_set_key,
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index c9a4e9a43400..301bd0043a43 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -12,7 +12,6 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/platform_device.h>
-#include <linux/wl12xx.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -197,7 +196,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct wl1251 *wl;
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
- const struct wl1251_platform_data *wl1251_board_data;
struct device_node *np = func->dev.of_node;
hw = wl1251_alloc_hw();
@@ -225,11 +223,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->if_priv = wl_sdio;
wl->if_ops = &wl1251_sdio_ops;
- wl1251_board_data = wl1251_get_platform_data();
- if (!IS_ERR(wl1251_board_data)) {
- wl->irq = wl1251_board_data->irq;
- wl->use_eeprom = wl1251_board_data->use_eeprom;
- } else if (np) {
+ if (np) {
wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
wl->irq = of_irq_get(np, 0);
if (wl->irq == -EPROBE_DEFER) {
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 9df38726e8b0..29292f06bd3d 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2008 Nokia Corporation
*/
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
@@ -12,16 +13,19 @@
#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
-#include <linux/wl12xx.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include "wl1251.h"
#include "reg.h"
#include "spi.h"
+struct wl1251_spi {
+ struct spi_device *spi;
+ struct gpio_desc *power_gpio;
+};
+
static irqreturn_t wl1251_irq(int irq, void *cookie)
{
struct wl1251 *wl;
@@ -35,13 +39,9 @@ static irqreturn_t wl1251_irq(int irq, void *cookie)
return IRQ_HANDLED;
}
-static struct spi_device *wl_to_spi(struct wl1251 *wl)
-{
- return wl->if_priv;
-}
-
static void wl1251_spi_reset(struct wl1251 *wl)
{
+ struct wl1251_spi *wl_spi = wl->if_priv;
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
@@ -61,7 +61,7 @@ static void wl1251_spi_reset(struct wl1251 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(wl_spi->spi, &m);
wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
@@ -70,6 +70,7 @@ static void wl1251_spi_reset(struct wl1251 *wl)
static void wl1251_spi_wake(struct wl1251 *wl)
{
+ struct wl1251_spi *wl_spi = wl->if_priv;
struct spi_transfer t;
struct spi_message m;
u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
@@ -113,7 +114,7 @@ static void wl1251_spi_wake(struct wl1251 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(wl_spi->spi, &m);
wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
@@ -129,6 +130,7 @@ static void wl1251_spi_reset_wake(struct wl1251 *wl)
static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf,
size_t len)
{
+ struct wl1251_spi *wl_spi = wl->if_priv;
struct spi_transfer t[3];
struct spi_message m;
u8 *busy_buf;
@@ -158,7 +160,7 @@ static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf,
t[2].len = len;
spi_message_add_tail(&t[2], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(wl_spi->spi, &m);
/* FIXME: check busy words */
@@ -169,6 +171,7 @@ static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf,
static void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf,
size_t len)
{
+ struct wl1251_spi *wl_spi = wl->if_priv;
struct spi_transfer t[2];
struct spi_message m;
u32 *cmd;
@@ -191,7 +194,7 @@ static void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(wl_spi->spi, &m);
wl1251_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1251_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
@@ -209,8 +212,10 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
{
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, enable);
+ struct wl1251_spi *wl_spi = wl->if_priv;
+
+ if (wl_spi->power_gpio)
+ gpiod_set_value_cansleep(wl_spi->power_gpio, enable);
return 0;
}
@@ -226,16 +231,20 @@ static const struct wl1251_if_operations wl1251_spi_ops = {
static int wl1251_spi_probe(struct spi_device *spi)
{
- struct wl1251_platform_data *pdata = dev_get_platdata(&spi->dev);
struct device_node *np = spi->dev.of_node;
struct ieee80211_hw *hw;
+ struct wl1251_spi *wl_spi;
struct wl1251 *wl;
int ret;
- if (!np && !pdata) {
- wl1251_error("no platform data");
+ if (!np)
return -ENODEV;
- }
+
+ wl_spi = devm_kzalloc(&spi->dev, sizeof(*wl_spi), GFP_KERNEL);
+ if (!wl_spi)
+ return -ENOMEM;
+
+ wl_spi->spi = spi;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -245,7 +254,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
SET_IEEE80211_DEV(hw, &spi->dev);
spi_set_drvdata(spi, wl);
- wl->if_priv = spi;
+ wl->if_priv = wl_spi;
wl->if_ops = &wl1251_spi_ops;
/* This is the only SPI value that we need to set here, the rest
@@ -259,32 +268,19 @@ static int wl1251_spi_probe(struct spi_device *spi)
goto out_free;
}
- if (np) {
- wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
- wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
- } else if (pdata) {
- wl->power_gpio = pdata->power_gpio;
- wl->use_eeprom = pdata->use_eeprom;
- }
+ wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
- if (wl->power_gpio == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out_free;
- }
-
- if (gpio_is_valid(wl->power_gpio)) {
- ret = devm_gpio_request_one(&spi->dev, wl->power_gpio,
- GPIOF_OUT_INIT_LOW, "wl1251 power");
- if (ret) {
+ wl_spi->power_gpio = devm_gpiod_get_optional(&spi->dev, "ti,power",
+ GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(wl_spi->power_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
wl1251_error("Failed to request gpio: %d\n", ret);
- goto out_free;
- }
- } else {
- wl1251_error("set power gpio missing in platform data");
- ret = -ENODEV;
goto out_free;
}
+ gpiod_set_consumer_name(wl_spi->power_gpio, "wl1251 power");
+
wl->irq = spi->irq;
if (wl->irq < 0) {
wl1251_error("irq missing in platform data");
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 23ae07dd4c2e..83adbc3c25dc 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -262,7 +262,6 @@ struct wl1251 {
void *if_priv;
const struct wl1251_if_operations *if_ops;
- int power_gpio;
int irq;
bool use_eeprom;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 28c0f06e311f..bf21611872a3 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5942,6 +5942,7 @@ static const struct ieee80211_ops wl1271_ops = {
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_key = wlcore_op_set_key,
.hw_scan = wl1271_op_hw_scan,
.cancel_hw_scan = wl1271_op_cancel_hw_scan,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 7eae1ec2eb2b..2d2edddc77bd 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -14,7 +14,6 @@
#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
-#include <linux/wl12xx.h>
#include <linux/platform_device.h>
#include <linux/of_irq.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 82bc0d44212e..a85fe7e4c6d4 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -886,7 +886,7 @@ static void zd1201_set_multicast(struct net_device *dev)
}
static int zd1201_config_commit(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *data, char *essid)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
{
struct zd1201 *zd = netdev_priv(dev);
@@ -894,15 +894,16 @@ static int zd1201_config_commit(struct net_device *dev,
}
static int zd1201_get_name(struct net_device *dev,
- struct iw_request_info *info, char *name, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
- strcpy(name, "IEEE 802.11b");
+ strcpy(wrqu->name, "IEEE 802.11b");
return 0;
}
static int zd1201_set_freq(struct net_device *dev,
- struct iw_request_info *info, struct iw_freq *freq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_freq *freq = &wrqu->freq;
struct zd1201 *zd = netdev_priv(dev);
short channel = 0;
int err;
@@ -922,8 +923,9 @@ static int zd1201_set_freq(struct net_device *dev,
}
static int zd1201_get_freq(struct net_device *dev,
- struct iw_request_info *info, struct iw_freq *freq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_freq *freq = &wrqu->freq;
struct zd1201 *zd = netdev_priv(dev);
short channel;
int err;
@@ -938,8 +940,9 @@ static int zd1201_get_freq(struct net_device *dev,
}
static int zd1201_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *mode, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ __u32 *mode = &wrqu->mode;
struct zd1201 *zd = netdev_priv(dev);
short porttype, monitor = 0;
unsigned char buffer[IW_ESSID_MAX_SIZE+2];
@@ -1001,8 +1004,9 @@ static int zd1201_set_mode(struct net_device *dev,
}
static int zd1201_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *mode, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ __u32 *mode = &wrqu->mode;
struct zd1201 *zd = netdev_priv(dev);
short porttype;
int err;
@@ -1038,8 +1042,9 @@ static int zd1201_get_mode(struct net_device *dev,
}
static int zd1201_get_range(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *wrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *wrq = &wrqu->data;
struct iw_range *range = (struct iw_range *)extra;
wrq->length = sizeof(struct iw_range);
@@ -1077,8 +1082,9 @@ static int zd1201_get_range(struct net_device *dev,
* the stats after asking the bssid.
*/
static int zd1201_get_wap(struct net_device *dev,
- struct iw_request_info *info, struct sockaddr *ap_addr, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct sockaddr *ap_addr = &wrqu->ap_addr;
struct zd1201 *zd = netdev_priv(dev);
unsigned char buffer[6];
@@ -1098,15 +1104,16 @@ static int zd1201_get_wap(struct net_device *dev,
}
static int zd1201_set_scan(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *srq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
/* We do everything in get_scan */
return 0;
}
static int zd1201_get_scan(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *srq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_point *srq = &wrqu->data;
struct zd1201 *zd = netdev_priv(dev);
int err, i, j, enabled_save;
struct iw_event iwe;
@@ -1197,8 +1204,9 @@ static int zd1201_get_scan(struct net_device *dev,
}
static int zd1201_set_essid(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *data, char *essid)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
{
+ struct iw_point *data = &wrqu->data;
struct zd1201 *zd = netdev_priv(dev);
if (data->length > IW_ESSID_MAX_SIZE)
@@ -1212,8 +1220,9 @@ static int zd1201_set_essid(struct net_device *dev,
}
static int zd1201_get_essid(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *data, char *essid)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
{
+ struct iw_point *data = &wrqu->data;
struct zd1201 *zd = netdev_priv(dev);
memcpy(essid, zd->essid, zd->essidlen);
@@ -1224,8 +1233,9 @@ static int zd1201_get_essid(struct net_device *dev,
}
static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *data, char *nick)
+ union iwreq_data *wrqu, char *nick)
{
+ struct iw_point *data = &wrqu->data;
strcpy(nick, "zd1201");
data->flags = 1;
data->length = strlen(nick);
@@ -1233,8 +1243,9 @@ static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
}
static int zd1201_set_rate(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->bitrate;
struct zd1201 *zd = netdev_priv(dev);
short rate;
int err;
@@ -1266,8 +1277,9 @@ static int zd1201_set_rate(struct net_device *dev,
}
static int zd1201_get_rate(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->bitrate;
struct zd1201 *zd = netdev_priv(dev);
short rate;
int err;
@@ -1299,8 +1311,9 @@ static int zd1201_get_rate(struct net_device *dev,
}
static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *rts, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rts = &wrqu->rts;
struct zd1201 *zd = netdev_priv(dev);
int err;
short val = rts->value;
@@ -1319,8 +1332,9 @@ static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
}
static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *rts, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rts = &wrqu->rts;
struct zd1201 *zd = netdev_priv(dev);
short rtst;
int err;
@@ -1336,8 +1350,9 @@ static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
}
static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *frag, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *frag = &wrqu->frag;
struct zd1201 *zd = netdev_priv(dev);
int err;
short val = frag->value;
@@ -1357,8 +1372,9 @@ static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
}
static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *frag, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *frag = &wrqu->frag;
struct zd1201 *zd = netdev_priv(dev);
short fragt;
int err;
@@ -1374,20 +1390,21 @@ static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
}
static int zd1201_set_retry(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
return 0;
}
static int zd1201_get_retry(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
return 0;
}
static int zd1201_set_encode(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *erq, char *key)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *key)
{
+ struct iw_point *erq = &wrqu->encoding;
struct zd1201 *zd = netdev_priv(dev);
short i;
int err, rid;
@@ -1443,8 +1460,9 @@ static int zd1201_set_encode(struct net_device *dev,
}
static int zd1201_get_encode(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *erq, char *key)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *key)
{
+ struct iw_point *erq = &wrqu->encoding;
struct zd1201 *zd = netdev_priv(dev);
short i;
int err;
@@ -1476,8 +1494,9 @@ static int zd1201_get_encode(struct net_device *dev,
}
static int zd1201_set_power(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *vwrq = &wrqu->power;
struct zd1201 *zd = netdev_priv(dev);
short enabled, duration, level;
int err;
@@ -1515,8 +1534,9 @@ out:
}
static int zd1201_get_power(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *vwrq = &wrqu->power;
struct zd1201 *zd = netdev_priv(dev);
short enabled, level, duration;
int err;
@@ -1553,57 +1573,37 @@ static int zd1201_get_power(struct net_device *dev,
static const iw_handler zd1201_iw_handler[] =
{
- (iw_handler) zd1201_config_commit, /* SIOCSIWCOMMIT */
- (iw_handler) zd1201_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) zd1201_set_freq, /* SIOCSIWFREQ */
- (iw_handler) zd1201_get_freq, /* SIOCGIWFREQ */
- (iw_handler) zd1201_set_mode, /* SIOCSIWMODE */
- (iw_handler) zd1201_get_mode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) zd1201_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- (iw_handler) NULL, /* SIOCSIWSPY */
- (iw_handler) NULL, /* SIOCGIWSPY */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL/*zd1201_set_wap*/, /* SIOCSIWAP */
- (iw_handler) zd1201_get_wap, /* SIOCGIWAP */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCGIWAPLIST */
- (iw_handler) zd1201_set_scan, /* SIOCSIWSCAN */
- (iw_handler) zd1201_get_scan, /* SIOCGIWSCAN */
- (iw_handler) zd1201_set_essid, /* SIOCSIWESSID */
- (iw_handler) zd1201_get_essid, /* SIOCGIWESSID */
- (iw_handler) NULL, /* SIOCSIWNICKN */
- (iw_handler) zd1201_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) zd1201_set_rate, /* SIOCSIWRATE */
- (iw_handler) zd1201_get_rate, /* SIOCGIWRATE */
- (iw_handler) zd1201_set_rts, /* SIOCSIWRTS */
- (iw_handler) zd1201_get_rts, /* SIOCGIWRTS */
- (iw_handler) zd1201_set_frag, /* SIOCSIWFRAG */
- (iw_handler) zd1201_get_frag, /* SIOCGIWFRAG */
- (iw_handler) NULL, /* SIOCSIWTXPOW */
- (iw_handler) NULL, /* SIOCGIWTXPOW */
- (iw_handler) zd1201_set_retry, /* SIOCSIWRETRY */
- (iw_handler) zd1201_get_retry, /* SIOCGIWRETRY */
- (iw_handler) zd1201_set_encode, /* SIOCSIWENCODE */
- (iw_handler) zd1201_get_encode, /* SIOCGIWENCODE */
- (iw_handler) zd1201_set_power, /* SIOCSIWPOWER */
- (iw_handler) zd1201_get_power, /* SIOCGIWPOWER */
+ IW_HANDLER(SIOCSIWCOMMIT, zd1201_config_commit),
+ IW_HANDLER(SIOCGIWNAME, zd1201_get_name),
+ IW_HANDLER(SIOCSIWFREQ, zd1201_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, zd1201_get_freq),
+ IW_HANDLER(SIOCSIWMODE, zd1201_set_mode),
+ IW_HANDLER(SIOCGIWMODE, zd1201_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, zd1201_get_range),
+ IW_HANDLER(SIOCGIWAP, zd1201_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, zd1201_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, zd1201_get_scan),
+ IW_HANDLER(SIOCSIWESSID, zd1201_set_essid),
+ IW_HANDLER(SIOCGIWESSID, zd1201_get_essid),
+ IW_HANDLER(SIOCGIWNICKN, zd1201_get_nick),
+ IW_HANDLER(SIOCSIWRATE, zd1201_set_rate),
+ IW_HANDLER(SIOCGIWRATE, zd1201_get_rate),
+ IW_HANDLER(SIOCSIWRTS, zd1201_set_rts),
+ IW_HANDLER(SIOCGIWRTS, zd1201_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, zd1201_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, zd1201_get_frag),
+ IW_HANDLER(SIOCSIWRETRY, zd1201_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, zd1201_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, zd1201_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, zd1201_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, zd1201_set_power),
+ IW_HANDLER(SIOCGIWPOWER, zd1201_get_power),
};
static int zd1201_set_hostauth(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->param;
struct zd1201 *zd = netdev_priv(dev);
if (!zd->ap)
@@ -1613,8 +1613,9 @@ static int zd1201_set_hostauth(struct net_device *dev,
}
static int zd1201_get_hostauth(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->param;
struct zd1201 *zd = netdev_priv(dev);
short hostauth;
int err;
@@ -1632,8 +1633,9 @@ static int zd1201_get_hostauth(struct net_device *dev,
}
static int zd1201_auth_sta(struct net_device *dev,
- struct iw_request_info *info, struct sockaddr *sta, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct sockaddr *sta = &wrqu->ap_addr;
struct zd1201 *zd = netdev_priv(dev);
unsigned char buffer[10];
@@ -1648,8 +1650,9 @@ static int zd1201_auth_sta(struct net_device *dev,
}
static int zd1201_set_maxassoc(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->param;
struct zd1201 *zd = netdev_priv(dev);
if (!zd->ap)
@@ -1659,8 +1662,9 @@ static int zd1201_set_maxassoc(struct net_device *dev,
}
static int zd1201_get_maxassoc(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *rrq, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
+ struct iw_param *rrq = &wrqu->param;
struct zd1201 *zd = netdev_priv(dev);
short maxassoc;
int err;
@@ -1678,12 +1682,12 @@ static int zd1201_get_maxassoc(struct net_device *dev,
}
static const iw_handler zd1201_private_handler[] = {
- (iw_handler) zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */
- (iw_handler) zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */
- (iw_handler) zd1201_auth_sta, /* ZD1201SIWAUTHSTA */
- (iw_handler) NULL, /* nothing to get */
- (iw_handler) zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */
- (iw_handler) zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */
+ zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */
+ zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */
+ zd1201_auth_sta, /* ZD1201SIWAUTHSTA */
+ NULL, /* nothing to get */
+ zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */
+ zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */
};
static const struct iw_priv_args zd1201_private_args[] = {
@@ -1703,8 +1707,8 @@ static const struct iw_handler_def zd1201_iw_handlers = {
.num_standard = ARRAY_SIZE(zd1201_iw_handler),
.num_private = ARRAY_SIZE(zd1201_private_handler),
.num_private_args = ARRAY_SIZE(zd1201_private_args),
- .standard = (iw_handler *)zd1201_iw_handler,
- .private = (iw_handler *)zd1201_private_handler,
+ .standard = zd1201_iw_handler,
+ .private = zd1201_private_handler,
.private_args = (struct iw_priv_args *) zd1201_private_args,
.get_wireless_stats = zd1201_get_wireless_stats,
};
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 80b905d49954..5d534e15a844 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1344,6 +1344,7 @@ static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static const struct ieee80211_ops zd_ops = {
.tx = zd_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = zd_op_start,
.stop = zd_op_stop,
.add_interface = zd_op_add_interface,
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index ac4d73b5626f..410b0245114e 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -108,6 +108,7 @@ config IOSM
config MTK_T7XX
tristate "MediaTek PCIe 5G WWAN modem T7xx device"
depends on PCI
+ select RELAY if WWAN_DEBUGFS
help
Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device.
Adapts WWAN framework and provides network interface like wwan0
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
index 128c999e08bb..bcfbc6b3d617 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -39,7 +39,7 @@ static struct ipc_chnl_cfg modem_cfg[] = {
/* RPC - 0 */
{ IPC_MEM_CTRL_CHL_ID_1, IPC_MEM_PIPE_2, IPC_MEM_PIPE_3,
IPC_MEM_MAX_TDS_RPC, IPC_MEM_MAX_TDS_RPC,
- IPC_MEM_MAX_DL_RPC_BUF_SIZE, WWAN_PORT_UNKNOWN },
+ IPC_MEM_MAX_DL_RPC_BUF_SIZE, WWAN_PORT_XMMRPC },
/* IAT0 */
{ IPC_MEM_CTRL_CHL_ID_2, IPC_MEM_PIPE_4, IPC_MEM_PIPE_5,
IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index ef70bb7c88ad..3f72ae943b29 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -456,19 +456,19 @@ static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&link->rx_syncp);
+ start = u64_stats_fetch_begin(&link->rx_syncp);
stats->rx_packets = u64_stats_read(&link->rx_packets);
stats->rx_bytes = u64_stats_read(&link->rx_bytes);
stats->rx_errors = u64_stats_read(&link->rx_errors);
- } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&link->rx_syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&link->tx_syncp);
+ start = u64_stats_fetch_begin(&link->tx_syncp);
stats->tx_packets = u64_stats_read(&link->tx_packets);
stats->tx_bytes = u64_stats_read(&link->tx_bytes);
stats->tx_errors = u64_stats_read(&link->tx_errors);
stats->tx_dropped = u64_stats_read(&link->tx_dropped);
- } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&link->tx_syncp, start));
}
static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
index dc6a7d682c15..268ff9e87e5b 100644
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -18,3 +18,6 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_hif_dpmaif_rx.o \
t7xx_dpmaif.o \
t7xx_netdev.o
+
+mtk_t7xx-$(CONFIG_WWAN_DEBUGFS) += \
+ t7xx_port_trace.o \
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 6ff30cb8eb16..aec3a18d44bd 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1018,6 +1018,8 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
+
+ md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
index 1225ca0ed51e..0ce4505e813d 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
@@ -20,6 +20,7 @@
#include <linux/bitmap.h>
#include <linux/mm_types.h>
+#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -109,20 +110,14 @@ struct dpmaif_rx_queue {
struct dpmaif_bat_request *bat_req;
struct dpmaif_bat_request *bat_frag;
- wait_queue_head_t rx_wq;
- struct task_struct *rx_thread;
- struct sk_buff_head skb_list;
- unsigned int skb_list_max_len;
-
- struct workqueue_struct *worker;
- struct work_struct dpmaif_rxq_work;
-
atomic_t rx_processing;
struct dpmaif_ctrl *dpmaif_ctrl;
unsigned int expect_pit_seq;
unsigned int pit_remain_release_cnt;
struct dpmaif_cur_rx_skb_info rx_data_info;
+ struct napi_struct napi;
+ bool sleep_lock_pending;
};
struct dpmaif_tx_queue {
@@ -168,7 +163,8 @@ enum dpmaif_txq_state {
struct dpmaif_callbacks {
void (*state_notify)(struct t7xx_pci_dev *t7xx_dev,
enum dpmaif_txq_state state, int txq_number);
- void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb);
+ void (*recv_skb)(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi);
};
struct dpmaif_ctrl {
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 91a0eb19e0d8..aa2174a10437 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -45,6 +45,7 @@
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#define DPMAIF_BAT_COUNT 8192
@@ -76,43 +77,6 @@ static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
return value;
}
-static int t7xx_dpmaif_net_rx_push_thread(void *arg)
-{
- struct dpmaif_rx_queue *q = arg;
- struct dpmaif_ctrl *hif_ctrl;
- struct dpmaif_callbacks *cb;
-
- hif_ctrl = q->dpmaif_ctrl;
- cb = hif_ctrl->callbacks;
-
- while (!kthread_should_stop()) {
- struct sk_buff *skb;
- unsigned long flags;
-
- if (skb_queue_empty(&q->skb_list)) {
- if (wait_event_interruptible(q->rx_wq,
- !skb_queue_empty(&q->skb_list) ||
- kthread_should_stop()))
- continue;
-
- if (kthread_should_stop())
- break;
- }
-
- spin_lock_irqsave(&q->skb_list.lock, flags);
- skb = __skb_dequeue(&q->skb_list);
- spin_unlock_irqrestore(&q->skb_list.lock, flags);
-
- if (!skb)
- continue;
-
- cb->recv_skb(hif_ctrl->t7xx_dev, skb);
- cond_resched();
- }
-
- return 0;
-}
-
static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
const unsigned int q_num, const unsigned int bat_cnt)
{
@@ -726,21 +690,10 @@ static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
return ret;
}
-static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->skb_list.lock, flags);
- if (rxq->skb_list.qlen < rxq->skb_list_max_len)
- __skb_queue_tail(&rxq->skb_list, skb);
- else
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
-}
-
static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
struct dpmaif_cur_rx_skb_info *skb_info)
{
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
struct sk_buff *skb = skb_info->cur_skb;
struct t7xx_skb_cb *skb_cb;
u8 netif_id;
@@ -758,11 +711,11 @@ static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
skb_cb = T7XX_SKB_CB(skb);
skb_cb->netif_idx = netif_id;
skb_cb->rx_pkt_type = skb_info->pkt_type;
- t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
+ dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
}
static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
- const unsigned long timeout)
+ const unsigned int budget, int *once_more)
{
unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
struct device *dev = rxq->dpmaif_ctrl->dev;
@@ -777,13 +730,14 @@ static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int
struct dpmaif_pit *pkt_info;
u32 val;
- if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
+ if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
break;
pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
- return -EAGAIN;
+ *once_more = 1;
+ return recv_skb_cnt;
}
val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
@@ -817,12 +771,7 @@ static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int
}
memset(skb_info, 0, sizeof(*skb_info));
-
recv_skb_cnt++;
- if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
- wake_up_all(&rxq->rx_wq);
- recv_skb_cnt = 0;
- }
}
}
@@ -837,16 +786,13 @@ static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int
}
}
- if (recv_skb_cnt)
- wake_up_all(&rxq->rx_wq);
-
if (!ret)
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
if (ret)
return ret;
- return rx_cnt;
+ return recv_skb_cnt;
}
static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
@@ -863,53 +809,30 @@ static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
return pit_cnt;
}
-static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
- const unsigned int q_num, const unsigned int budget)
+static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num,
+ const unsigned int budget, int *once_more)
{
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
- unsigned long time_limit;
unsigned int cnt;
+ int ret = 0;
- time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
-
- while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
- unsigned int rd_cnt;
- int real_cnt;
-
- rd_cnt = min(cnt, budget);
-
- real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
- if (real_cnt < 0)
- return real_cnt;
-
- if (real_cnt < cnt)
- return -EAGAIN;
- }
-
- return 0;
-}
+ cnt = t7xx_dpmaifq_poll_pit(rxq);
+ if (!cnt)
+ return ret;
-static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
-{
- struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
- int ret;
+ ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
- ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
- if (ret < 0) {
- /* Try one more time */
- queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- } else {
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
- }
+ return ret;
}
-static void t7xx_dpmaif_rxq_work(struct work_struct *work)
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
{
- struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
- struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
- int ret;
+ struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
+ struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
+ int ret, once_more = 0, work_done = 0;
atomic_set(&rxq->rx_processing, 1);
/* Ensure rx_processing is changed to 1 before actually begin RX flow */
@@ -917,22 +840,52 @@ static void t7xx_dpmaif_rxq_work(struct work_struct *work)
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
- dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
- return;
+ dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
+ return work_done;
}
- ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
- if (ret < 0 && ret != -EACCES)
- return;
+ if (!rxq->sleep_lock_pending) {
+ pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ t7xx_pci_disable_sleep(t7xx_dev);
+ }
- t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
- if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
+ if (!ret) {
+ napi_complete_done(napi, work_done);
+ rxq->sleep_lock_pending = true;
+ napi_reschedule(napi);
+ return work_done;
+ }
- t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
- pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+ rxq->sleep_lock_pending = false;
+ while (work_done < budget) {
+ int each_budget = budget - work_done;
+ int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
+ each_budget, &once_more);
+ if (rx_cnt > 0)
+ work_done += rx_cnt;
+ else
+ break;
+ }
+
+ if (once_more) {
+ napi_gro_flush(napi, false);
+ work_done = budget;
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ } else if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ }
+
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
+
+ return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
@@ -947,7 +900,7 @@ void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int
}
rxq = &dpmaif_ctrl->rxq[qno];
- queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
+ napi_schedule(&rxq->napi);
}
static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
@@ -1082,50 +1035,14 @@ int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
int ret;
ret = t7xx_dpmaif_rx_alloc(queue);
- if (ret < 0) {
+ if (ret < 0)
dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
- return ret;
- }
-
- INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
-
- queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
- if (!queue->worker) {
- ret = -ENOMEM;
- goto err_free_rx_buffer;
- }
-
- init_waitqueue_head(&queue->rx_wq);
- skb_queue_head_init(&queue->skb_list);
- queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
- queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
- queue, "dpmaif_rx%d_push", queue->index);
-
- ret = PTR_ERR_OR_ZERO(queue->rx_thread);
- if (ret)
- goto err_free_workqueue;
-
- return 0;
-
-err_free_workqueue:
- destroy_workqueue(queue->worker);
-
-err_free_rx_buffer:
- t7xx_dpmaif_rx_buf_free(queue);
return ret;
}
void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
{
- if (queue->worker)
- destroy_workqueue(queue->worker);
-
- if (queue->rx_thread)
- kthread_stop(queue->rx_thread);
-
- skb_queue_purge(&queue->skb_list);
t7xx_dpmaif_rx_buf_free(queue);
}
@@ -1188,8 +1105,6 @@ void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
int timeout, value;
- flush_work(&rxq->dpmaif_rxq_work);
-
timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
!value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
if (timeout)
@@ -1205,7 +1120,6 @@ static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
{
int cnt, j = 0;
- flush_work(&rxq->dpmaif_rxq_work);
rxq->que_started = false;
do {
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
index 182f62dfe398..f4e1b69ad426 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
@@ -112,5 +112,6 @@ int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_b
const enum bat_type buf_type);
void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl,
struct dpmaif_bat_request *bat_req);
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget);
#endif /* __T7XX_HIF_DPMA_RX_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index f71d3bc3b237..494a28e386a3 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -22,6 +22,7 @@
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdev_features.h>
@@ -29,6 +30,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
+#include <net/ipv6.h>
#include <net/pkt_sched.h>
#include "t7xx_hif_dpmaif_rx.h"
@@ -39,13 +41,47 @@
#include "t7xx_state_monitor.h"
#define IP_MUX_SESSION_DEFAULT 0
+#define SBD_PACKET_TYPE_MASK GENMASK(7, 4)
+
+static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_enable(ctlb->napi[i]);
+ napi_schedule(ctlb->napi[i]);
+ }
+ ctlb->is_napi_en = true;
+}
+
+static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (!ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_synchronize(ctlb->napi[i]);
+ napi_disable(ctlb->napi[i]);
+ }
+
+ ctlb->is_napi_en = false;
+}
static int t7xx_ccmni_open(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ccmni_ctl);
+
atomic_inc(&ccmni->usage);
return 0;
}
@@ -53,8 +89,12 @@ static int t7xx_ccmni_open(struct net_device *dev)
static int t7xx_ccmni_close(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
atomic_dec(&ccmni->usage);
+ if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ccmni_ctl);
+
netif_carrier_off(dev);
netif_tx_disable(dev);
return 0;
@@ -127,6 +167,9 @@ static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
netif_carrier_on(ccmni->dev);
}
}
+
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ctlb);
}
static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
@@ -149,6 +192,9 @@ static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
struct t7xx_ccmni *ccmni;
int i;
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ctlb);
+
for (i = 0; i < ctlb->nic_dev_num; i++) {
ccmni = ctlb->ccmni_inst[i];
if (!ccmni)
@@ -161,7 +207,7 @@ static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
static void t7xx_ccmni_wwan_setup(struct net_device *dev)
{
- dev->hard_header_len += sizeof(struct ccci_header);
+ dev->needed_headroom += sizeof(struct ccci_header);
dev->mtu = ETH_DATA_LEN;
dev->max_mtu = CCMNI_MTU_MAX;
@@ -183,6 +229,9 @@ static void t7xx_ccmni_wwan_setup(struct net_device *dev)
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features |= NETIF_F_GRO;
+
dev->needs_free_netdev = true;
dev->type = ARPHRD_NONE;
@@ -190,6 +239,34 @@ static void t7xx_ccmni_wwan_setup(struct net_device *dev)
dev->netdev_ops = &ccmni_netdev_ops;
}
+static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ /* one HW, but shared with multiple net devices,
+ * so add a dummy device for NAPI.
+ */
+ init_dummy_netdev(&ctlb->dummy_dev);
+ atomic_set(&ctlb->napi_usr_refcnt, 0);
+ ctlb->is_napi_en = false;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
+ netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
+ NIC_NAPI_POLL_BUDGET);
+ }
+}
+
+static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ netif_napi_del(ctlb->napi[i]);
+ ctlb->napi[i] = NULL;
+ }
+}
+
static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
struct netlink_ext_ack *extack)
{
@@ -311,7 +388,8 @@ static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
}
-static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
+static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct t7xx_skb_cb *skb_cb;
struct net_device *net_dev;
@@ -321,23 +399,22 @@ static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *s
skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx;
- ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
+ ccmni = ccmni_ctlb->ccmni_inst[netif_id];
if (!ccmni) {
dev_kfree_skb(skb);
return;
}
net_dev = ccmni->dev;
- skb->dev = net_dev;
-
pkt_type = skb_cb->rx_pkt_type;
+ skb->dev = net_dev;
if (pkt_type == PKT_TYPE_IP6)
skb->protocol = htons(ETH_P_IPV6);
else
skb->protocol = htons(ETH_P_IP);
skb_len = skb->len;
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb_len;
}
@@ -404,6 +481,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
if (!ctlb->hif_ctrl)
return -ENOMEM;
+ t7xx_init_netdev_napi(ctlb);
init_md_status_notifier(t7xx_dev);
return 0;
}
@@ -419,5 +497,6 @@ void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
ctlb->wwan_is_registered = false;
}
+ t7xx_uninit_netdev_napi(ctlb);
t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h
index f5ad49ca12a7..f5ed6f99a145 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.h
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -30,6 +30,7 @@
#define CCMNI_NETDEV_WDT_TO (1 * HZ)
#define CCMNI_MTU_MAX 3000
+#define NIC_NAPI_POLL_BUDGET 128
struct t7xx_ccmni {
u8 index;
@@ -47,6 +48,10 @@ struct t7xx_ccmni_ctrl {
unsigned int md_sta;
struct t7xx_fsm_notifier md_status_notify;
bool wwan_is_registered;
+ struct net_device dummy_dev;
+ struct napi_struct *napi[RXQ_NUM];
+ atomic_t napi_usr_refcnt;
+ bool is_napi_en;
};
int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev);
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index 50b37056ce5a..112efa534eac 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -78,6 +78,9 @@ struct t7xx_pci_dev {
spinlock_t md_pm_lock; /* Protects PCI resource lock */
unsigned int sleep_disable_count;
struct completion sleep_lock_acquire;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
};
enum t7xx_pm_id {
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
index dc4133eb433a..8ea9079af997 100644
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -99,7 +99,6 @@ struct t7xx_port_conf {
struct t7xx_port {
/* Members not initialized in definition */
const struct t7xx_port_conf *port_conf;
- struct wwan_port *wwan_port;
struct t7xx_pci_dev *t7xx_dev;
struct device *dev;
u16 seq_nums[2]; /* TX/RX sequence numbers */
@@ -122,6 +121,14 @@ struct t7xx_port {
int rx_length_th;
bool chan_enable;
struct task_struct *thread;
+ union {
+ struct {
+ struct wwan_port *wwan_port;
+ } wwan;
+ struct {
+ struct rchan *relaych;
+ } log;
+ };
};
struct sk_buff *t7xx_port_alloc_skb(int payload);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index d4de047ff0d4..894b1d11b2c9 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -70,6 +70,18 @@ static const struct t7xx_port_conf t7xx_md_port_conf[] = {
.name = "MBIM",
.port_type = WWAN_PORT_MBIM,
}, {
+#ifdef CONFIG_WWAN_DEBUGFS
+ .tx_ch = PORT_CH_MD_LOG_TX,
+ .rx_ch = PORT_CH_MD_LOG_RX,
+ .txq_index = 7,
+ .rxq_index = 7,
+ .txq_exp_index = 7,
+ .rxq_exp_index = 7,
+ .path_id = CLDMA_ID_MD,
+ .ops = &t7xx_trace_port_ops,
+ .name = "mdlog",
+ }, {
+#endif
.tx_ch = PORT_CH_CONTROL_TX,
.rx_ch = PORT_CH_CONTROL_RX,
.txq_index = Q_IDX_CTRL,
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
index bc1ff5c6c700..81d059fbc0fb 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -87,6 +87,10 @@ struct ctrl_msg_header {
extern struct port_ops wwan_sub_port_ops;
extern struct port_ops ctl_port_ops;
+#ifdef CONFIG_WWAN_DEBUGFS
+extern struct port_ops t7xx_trace_port_ops;
+#endif
+
void t7xx_port_proxy_reset(struct port_proxy *port_prox);
void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
int t7xx_port_proxy_init(struct t7xx_modem *md);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
new file mode 100644
index 000000000000..6a3f36385865
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/skbuff.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define T7XX_TRC_SUB_BUFF_SIZE 131072
+#define T7XX_TRC_N_SUB_BUFF 32
+
+static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = t7xx_trace_subbuf_start_handler,
+ .create_buf_file = t7xx_trace_create_buf_file_handler,
+ .remove_buf_file = t7xx_trace_remove_buf_file_handler,
+};
+
+static void t7xx_trace_port_uninit(struct t7xx_port *port)
+{
+ struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir;
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return;
+
+ relay_close(relaych);
+ debugfs_remove_recursive(debugfs_dir);
+}
+
+static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return -EINVAL;
+
+ relay_write(relaych, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ struct rchan *relaych = port->log.relaych;
+ struct dentry *debugfs_wwan_dir;
+ struct dentry *debugfs_dir;
+
+ if (state != MD_STATE_READY || relaych)
+ return;
+
+ debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev);
+ if (IS_ERR(debugfs_wwan_dir))
+ return;
+
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir);
+ if (IS_ERR_OR_NULL(debugfs_dir)) {
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create debugfs for trace");
+ return;
+ }
+
+ relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE,
+ T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL);
+ if (!relaych)
+ goto err_rm_debugfs_dir;
+
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ port->log.relaych = relaych;
+ port->t7xx_dev->debugfs_dir = debugfs_dir;
+ return;
+
+err_rm_debugfs_dir:
+ debugfs_remove_recursive(debugfs_dir);
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name);
+}
+
+struct port_ops t7xx_trace_port_ops = {
+ .recv_skb = t7xx_trace_port_recv_skb,
+ .uninit = t7xx_trace_port_uninit,
+ .md_state_notify = t7xx_port_trace_md_state_notify,
+};
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
index 33931bfd78fd..24bd21942403 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -109,12 +109,12 @@ static int t7xx_port_wwan_init(struct t7xx_port *port)
static void t7xx_port_wwan_uninit(struct t7xx_port *port)
{
- if (!port->wwan_port)
+ if (!port->wwan.wwan_port)
return;
port->rx_length_th = 0;
- wwan_remove_port(port->wwan_port);
- port->wwan_port = NULL;
+ wwan_remove_port(port->wwan.wwan_port);
+ port->wwan.wwan_port = NULL;
}
static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
@@ -129,7 +129,7 @@ static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
return 0;
}
- wwan_port_rx(port->wwan_port, skb);
+ wwan_port_rx(port->wwan.wwan_port, skb);
return 0;
}
@@ -158,10 +158,10 @@ static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int
if (state != MD_STATE_READY)
return;
- if (!port->wwan_port) {
- port->wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, port);
- if (IS_ERR(port->wwan_port))
+ if (!port->wwan.wwan_port) {
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, port);
+ if (IS_ERR(port->wwan.wwan_port))
dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
}
}
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 62e9f7d6c9fe..966d0ccd2276 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -319,6 +319,10 @@ static const struct {
.name = "FIREHOSE",
.devsuf = "firehose",
},
+ [WWAN_PORT_XMMRPC] = {
+ .name = "XMMRPC",
+ .devsuf = "xmmrpc",
+ },
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -1058,7 +1062,7 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
goto unlock;
}
- rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */
+ rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */
unlock:
rtnl_unlock();
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index dc404e05970c..14aec417fa06 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1392,16 +1392,16 @@ static void xennet_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
tx_packets = tx_stats->packets;
tx_bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
rx_packets = rx_stats->packets;
rx_bytes = rx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 5eaa18f81355..e72b358a2a12 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -231,8 +231,7 @@ static const struct nfc_phy_ops i2c_phy_ops = {
.disable = microread_i2c_disable,
};
-static int microread_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int microread_i2c_probe(struct i2c_client *client)
{
struct microread_i2c_phy *phy;
int r;
@@ -287,7 +286,7 @@ static struct i2c_driver microread_i2c_driver = {
.driver = {
.name = MICROREAD_I2C_DRIVER_NAME,
},
- .probe = microread_i2c_probe,
+ .probe_new = microread_i2c_probe,
.remove = microread_i2c_remove,
.id_table = microread_i2c_id,
};
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 97600826af69..e74342b0b728 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -183,8 +183,7 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
return 0;
}
-static int nfcmrvl_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nfcmrvl_i2c_probe(struct i2c_client *client)
{
const struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_i2c_drv_data *drv_data;
@@ -259,7 +258,7 @@ static const struct i2c_device_id nfcmrvl_i2c_id_table[] = {
MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table);
static struct i2c_driver nfcmrvl_i2c_driver = {
- .probe = nfcmrvl_i2c_probe,
+ .probe_new = nfcmrvl_i2c_probe,
.id_table = nfcmrvl_i2c_id_table,
.remove = nfcmrvl_i2c_remove,
.driver = {
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index ec6446511984..d4c299be7949 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -263,8 +263,7 @@ static const struct acpi_gpio_mapping acpi_nxp_nci_gpios[] = {
{ }
};
-static int nxp_nci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nxp_nci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct nxp_nci_i2c_phy *phy;
@@ -349,7 +348,7 @@ static struct i2c_driver nxp_nci_i2c_driver = {
.acpi_match_table = ACPI_PTR(acpi_id),
.of_match_table = of_nxp_nci_i2c_match,
},
- .probe = nxp_nci_i2c_probe,
+ .probe_new = nxp_nci_i2c_probe,
.id_table = nxp_nci_i2c_id_table,
.remove = nxp_nci_i2c_remove,
};
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index ddf3db286bad..1503a98f0405 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -163,8 +163,7 @@ static const struct pn533_phy_ops i2c_phy_ops = {
};
-static int pn533_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pn533_i2c_probe(struct i2c_client *client)
{
struct pn533_i2c_phy *phy;
struct pn533 *priv;
@@ -260,7 +259,7 @@ static struct i2c_driver pn533_i2c_driver = {
.name = PN533_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_pn533_i2c_match),
},
- .probe = pn533_i2c_probe,
+ .probe_new = pn533_i2c_probe,
.id_table = pn533_i2c_id_table,
.remove = pn533_i2c_remove,
};
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 9e754abcfa2a..8b0d910bee06 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -866,8 +866,7 @@ static const struct acpi_gpio_mapping acpi_pn544_gpios[] = {
{ },
};
-static int pn544_hci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pn544_hci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct pn544_i2c_phy *phy;
@@ -954,7 +953,7 @@ static struct i2c_driver pn544_hci_i2c_driver = {
.of_match_table = of_match_ptr(of_pn544_i2c_match),
.acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match),
},
- .probe = pn544_hci_i2c_probe,
+ .probe_new = pn544_hci_i2c_probe,
.id_table = pn544_hci_i2c_id_table,
.remove = pn544_hci_i2c_remove,
};
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index f824dc7099ce..2517ae71f9a4 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -177,8 +177,7 @@ static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
return 0;
}
-static int s3fwrn5_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s3fwrn5_i2c_probe(struct i2c_client *client)
{
struct s3fwrn5_i2c_phy *phy;
int ret;
@@ -209,27 +208,21 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- phy->clk = devm_clk_get_optional(&client->dev, NULL);
- if (IS_ERR(phy->clk))
- return dev_err_probe(&client->dev, PTR_ERR(phy->clk),
- "failed to get clock\n");
-
/*
* S3FWRN5 depends on a clock input ("XI" pin) to function properly.
* Depending on the hardware configuration this could be an always-on
* oscillator or some external clock that must be explicitly enabled.
* Make sure the clock is running before starting S3FWRN5.
*/
- ret = clk_prepare_enable(phy->clk);
- if (ret < 0) {
- dev_err(&client->dev, "failed to enable clock: %d\n", ret);
- return ret;
- }
+ phy->clk = devm_clk_get_optional_enabled(&client->dev, NULL);
+ if (IS_ERR(phy->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(phy->clk),
+ "failed to get clock\n");
ret = s3fwrn5_probe(&phy->common.ndev, phy, &phy->i2c_dev->dev,
&i2c_phy_ops);
if (ret < 0)
- goto disable_clk;
+ return ret;
ret = devm_request_threaded_irq(&client->dev, phy->i2c_dev->irq, NULL,
s3fwrn5_i2c_irq_thread_fn, IRQF_ONESHOT,
@@ -241,8 +234,6 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
s3fwrn5_remove:
s3fwrn5_remove(phy->common.ndev);
-disable_clk:
- clk_disable_unprepare(phy->clk);
return ret;
}
@@ -251,7 +242,6 @@ static void s3fwrn5_i2c_remove(struct i2c_client *client)
struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
s3fwrn5_remove(phy->common.ndev);
- clk_disable_unprepare(phy->clk);
}
static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
@@ -271,7 +261,7 @@ static struct i2c_driver s3fwrn5_i2c_driver = {
.name = S3FWRN5_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
},
- .probe = s3fwrn5_i2c_probe,
+ .probe_new = s3fwrn5_i2c_probe,
.remove = s3fwrn5_i2c_remove,
.id_table = s3fwrn5_i2c_id_table,
};
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 89fa24d71bef..6b5eed8a1fbe 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -195,8 +195,7 @@ static const struct acpi_gpio_mapping acpi_st_nci_gpios[] = {
{},
};
-static int st_nci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st_nci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct st_nci_i2c_phy *phy;
@@ -284,7 +283,7 @@ static struct i2c_driver st_nci_i2c_driver = {
.of_match_table = of_match_ptr(of_st_nci_i2c_match),
.acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match),
},
- .probe = st_nci_i2c_probe,
+ .probe_new = st_nci_i2c_probe,
.id_table = st_nci_i2c_id_table,
.remove = st_nci_i2c_remove,
};
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 76b55986bcf8..55f7a2391bb1 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -487,8 +487,7 @@ static const struct acpi_gpio_mapping acpi_st21nfca_gpios[] = {
{},
};
-static int st21nfca_hci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st21nfca_hci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct st21nfca_i2c_phy *phy;
@@ -598,7 +597,7 @@ static struct i2c_driver st21nfca_hci_i2c_driver = {
.of_match_table = of_match_ptr(of_st21nfca_i2c_match),
.acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match),
},
- .probe = st21nfca_hci_i2c_probe,
+ .probe_new = st21nfca_hci_i2c_probe,
.id_table = st21nfca_hci_i2c_id_table,
.remove = st21nfca_hci_i2c_remove,
};
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 85c06dbb2c44..bb76c7c7cc82 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -13,12 +13,6 @@
#include <linux/wait.h>
#include <net/nfc/nci_core.h>
-enum virtual_ncidev_mode {
- virtual_ncidev_enabled,
- virtual_ncidev_disabled,
- virtual_ncidev_disabling,
-};
-
#define IOCTL_GET_NCIDEV_IDX 0
#define VIRTUAL_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
NFC_PROTO_MIFARE_MASK | \
@@ -27,12 +21,12 @@ enum virtual_ncidev_mode {
NFC_PROTO_ISO14443_B_MASK | \
NFC_PROTO_ISO15693_MASK)
-static enum virtual_ncidev_mode state;
-static DECLARE_WAIT_QUEUE_HEAD(wq);
-static struct miscdevice miscdev;
-static struct sk_buff *send_buff;
-static struct nci_dev *ndev;
-static DEFINE_MUTEX(nci_mutex);
+struct virtual_nci_dev {
+ struct nci_dev *ndev;
+ struct mutex mtx;
+ struct sk_buff *send_buff;
+ struct wait_queue_head wq;
+};
static int virtual_nci_open(struct nci_dev *ndev)
{
@@ -41,31 +35,34 @@ static int virtual_nci_open(struct nci_dev *ndev)
static int virtual_nci_close(struct nci_dev *ndev)
{
- mutex_lock(&nci_mutex);
- kfree_skb(send_buff);
- send_buff = NULL;
- mutex_unlock(&nci_mutex);
+ struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
+
+ mutex_lock(&vdev->mtx);
+ kfree_skb(vdev->send_buff);
+ vdev->send_buff = NULL;
+ mutex_unlock(&vdev->mtx);
return 0;
}
static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
{
- mutex_lock(&nci_mutex);
- if (state != virtual_ncidev_enabled) {
- mutex_unlock(&nci_mutex);
+ struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
+
+ mutex_lock(&vdev->mtx);
+ if (vdev->send_buff) {
+ mutex_unlock(&vdev->mtx);
kfree_skb(skb);
- return 0;
+ return -1;
}
-
- if (send_buff) {
- mutex_unlock(&nci_mutex);
+ vdev->send_buff = skb_copy(skb, GFP_KERNEL);
+ if (!vdev->send_buff) {
+ mutex_unlock(&vdev->mtx);
kfree_skb(skb);
return -1;
}
- send_buff = skb_copy(skb, GFP_KERNEL);
- mutex_unlock(&nci_mutex);
- wake_up_interruptible(&wq);
+ mutex_unlock(&vdev->mtx);
+ wake_up_interruptible(&vdev->wq);
consume_skb(skb);
return 0;
@@ -80,29 +77,30 @@ static const struct nci_ops virtual_nci_ops = {
static ssize_t virtual_ncidev_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct virtual_nci_dev *vdev = file->private_data;
size_t actual_len;
- mutex_lock(&nci_mutex);
- while (!send_buff) {
- mutex_unlock(&nci_mutex);
- if (wait_event_interruptible(wq, send_buff))
+ mutex_lock(&vdev->mtx);
+ while (!vdev->send_buff) {
+ mutex_unlock(&vdev->mtx);
+ if (wait_event_interruptible(vdev->wq, vdev->send_buff))
return -EFAULT;
- mutex_lock(&nci_mutex);
+ mutex_lock(&vdev->mtx);
}
- actual_len = min_t(size_t, count, send_buff->len);
+ actual_len = min_t(size_t, count, vdev->send_buff->len);
- if (copy_to_user(buf, send_buff->data, actual_len)) {
- mutex_unlock(&nci_mutex);
+ if (copy_to_user(buf, vdev->send_buff->data, actual_len)) {
+ mutex_unlock(&vdev->mtx);
return -EFAULT;
}
- skb_pull(send_buff, actual_len);
- if (send_buff->len == 0) {
- consume_skb(send_buff);
- send_buff = NULL;
+ skb_pull(vdev->send_buff, actual_len);
+ if (vdev->send_buff->len == 0) {
+ consume_skb(vdev->send_buff);
+ vdev->send_buff = NULL;
}
- mutex_unlock(&nci_mutex);
+ mutex_unlock(&vdev->mtx);
return actual_len;
}
@@ -111,6 +109,7 @@ static ssize_t virtual_ncidev_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct virtual_nci_dev *vdev = file->private_data;
struct sk_buff *skb;
skb = alloc_skb(count, GFP_KERNEL);
@@ -122,63 +121,58 @@ static ssize_t virtual_ncidev_write(struct file *file,
return -EFAULT;
}
- nci_recv_frame(ndev, skb);
+ nci_recv_frame(vdev->ndev, skb);
return count;
}
static int virtual_ncidev_open(struct inode *inode, struct file *file)
{
int ret = 0;
+ struct virtual_nci_dev *vdev;
- mutex_lock(&nci_mutex);
- if (state != virtual_ncidev_disabled) {
- mutex_unlock(&nci_mutex);
- return -EBUSY;
- }
-
- ndev = nci_allocate_device(&virtual_nci_ops, VIRTUAL_NFC_PROTOCOLS,
- 0, 0);
- if (!ndev) {
- mutex_unlock(&nci_mutex);
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+ vdev->ndev = nci_allocate_device(&virtual_nci_ops,
+ VIRTUAL_NFC_PROTOCOLS, 0, 0);
+ if (!vdev->ndev) {
+ kfree(vdev);
return -ENOMEM;
}
- ret = nci_register_device(ndev);
+ mutex_init(&vdev->mtx);
+ init_waitqueue_head(&vdev->wq);
+ file->private_data = vdev;
+ nci_set_drvdata(vdev->ndev, vdev);
+
+ ret = nci_register_device(vdev->ndev);
if (ret < 0) {
- nci_free_device(ndev);
- mutex_unlock(&nci_mutex);
+ nci_free_device(vdev->ndev);
+ mutex_destroy(&vdev->mtx);
+ kfree(vdev);
return ret;
}
- state = virtual_ncidev_enabled;
- mutex_unlock(&nci_mutex);
return 0;
}
static int virtual_ncidev_close(struct inode *inode, struct file *file)
{
- mutex_lock(&nci_mutex);
-
- if (state == virtual_ncidev_enabled) {
- state = virtual_ncidev_disabling;
- mutex_unlock(&nci_mutex);
+ struct virtual_nci_dev *vdev = file->private_data;
- nci_unregister_device(ndev);
- nci_free_device(ndev);
-
- mutex_lock(&nci_mutex);
- }
-
- state = virtual_ncidev_disabled;
- mutex_unlock(&nci_mutex);
+ nci_unregister_device(vdev->ndev);
+ nci_free_device(vdev->ndev);
+ mutex_destroy(&vdev->mtx);
+ kfree(vdev);
return 0;
}
-static long virtual_ncidev_ioctl(struct file *flip, unsigned int cmd,
+static long virtual_ncidev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const struct nfc_dev *nfc_dev = ndev->nfc_dev;
+ struct virtual_nci_dev *vdev = file->private_data;
+ const struct nfc_dev *nfc_dev = vdev->ndev->nfc_dev;
void __user *p = (void __user *)arg;
if (cmd != IOCTL_GET_NCIDEV_IDX)
@@ -199,14 +193,15 @@ static const struct file_operations virtual_ncidev_fops = {
.unlocked_ioctl = virtual_ncidev_ioctl
};
+static struct miscdevice miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "virtual_nci",
+ .fops = &virtual_ncidev_fops,
+ .mode = 0600,
+};
+
static int __init virtual_ncidev_init(void)
{
- state = virtual_ncidev_disabled;
- miscdev.minor = MISC_DYNAMIC_MINOR;
- miscdev.name = "virtual_nci";
- miscdev.fops = &virtual_ncidev_fops;
- miscdev.mode = 0600;
-
return misc_register(&miscdev);
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 51cae72bb6db..62d4d29e7c05 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -131,10 +131,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
long ppb = scaled_ppm_to_ppb(tx->freq);
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
- if (ops->adjfine)
- err = ops->adjfine(ops, tx->freq);
- else
- err = ops->adjfreq(ops, ppb);
+ err = ops->adjfine(ops, tx->freq);
ptp->dialed_frequency = tx->freq;
} else if (tx->modes & ADJ_OFFSET) {
if (ops->adjphase) {
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 8641fd060491..7cc5a00e625b 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -134,8 +134,9 @@ static s64 dte_read_nco_with_ovf(struct ptp_dte *ptp_dte)
return ns;
}
-static int ptp_dte_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ptp_dte_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
u32 nco_incr;
unsigned long flags;
struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
@@ -219,7 +220,7 @@ static const struct ptp_clock_info ptp_dte_caps = {
.n_ext_ts = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = ptp_dte_adjfreq,
+ .adjfine = ptp_dte_adjfine,
.adjtime = ptp_dte_adjtime,
.gettime64 = ptp_dte_gettime,
.settime64 = ptp_dte_settime,
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index 97c1be44e323..afc76c22271a 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -27,6 +27,8 @@ MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILENAME);
+#define EXTTS_PERIOD_MS (95)
+
/* Module Parameters */
static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
module_param(phase_snap_threshold, uint, 0);
@@ -36,6 +38,8 @@ MODULE_PARM_DESC(phase_snap_threshold,
static char *firmware;
module_param(firmware, charp, 0);
+static struct ptp_pin_desc pin_config[MAX_PHC_PLL][MAX_TRIG_CLK];
+
static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr,
u8 *buf, u16 count)
{
@@ -121,24 +125,270 @@ static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
return 0;
}
-static int _idt82p33_gettime(struct idt82p33_channel *channel,
- struct timespec64 *ts)
+static int idt82p33_set_tod_trigger(struct idt82p33_channel *channel,
+ u8 trigger, bool write)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+ u8 cfg;
+
+ if (trigger > WR_TRIG_SEL_MAX)
+ return -EINVAL;
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_trigger,
+ &cfg, sizeof(cfg));
+
+ if (err)
+ return err;
+
+ if (write == true)
+ trigger = (trigger << WRITE_TRIGGER_SHIFT) |
+ (cfg & READ_TRIGGER_MASK);
+ else
+ trigger = (trigger << READ_TRIGGER_SHIFT) |
+ (cfg & WRITE_TRIGGER_MASK);
+
+ return idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+}
+
+static int idt82p33_get_extts(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ int err;
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ /* Since trigger is not self clearing itself, we have to poll tod_sts */
+ if (memcmp(buf, channel->extts_tod_sts, TOD_BYTE_COUNT) == 0)
+ return -EAGAIN;
+
+ memcpy(channel->extts_tod_sts, buf, TOD_BYTE_COUNT);
+
+ idt82p33_byte_array_to_timespec(ts, buf);
+
+ if (channel->discard_next_extts) {
+ channel->discard_next_extts = false;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int map_ref_to_tod_trig_sel(int ref, u8 *trigger)
+{
+ int err = 0;
+
+ switch (ref) {
+ case 0:
+ *trigger = HW_TOD_TRIG_SEL_IN12;
+ break;
+ case 1:
+ *trigger = HW_TOD_TRIG_SEL_IN13;
+ break;
+ case 2:
+ *trigger = HW_TOD_TRIG_SEL_IN14;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static bool is_one_shot(u8 mask)
+{
+ /* Treat single bit PLL masks as continuous trigger */
+ if ((mask == 1) || (mask == 2))
+ return false;
+ else
+ return true;
+}
+
+static int arm_tod_read_with_trigger(struct idt82p33_channel *channel, u8 trigger)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
+ int err;
+
+ /* Remember the current tod_sts before setting the trigger */
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ memcpy(channel->extts_tod_sts, buf, TOD_BYTE_COUNT);
+
+ err = idt82p33_set_tod_trigger(channel, trigger, false);
+
+ if (err)
+ dev_err(idt82p33->dev, "%s: err = %d", __func__, err);
+
+ return err;
+}
+
+static int idt82p33_extts_enable(struct idt82p33_channel *channel,
+ struct ptp_clock_request *rq, int on)
+{
+ u8 index = rq->extts.index;
+ struct idt82p33 *idt82p33;
+ u8 mask = 1 << index;
+ int err = 0;
+ u8 old_mask;
u8 trigger;
+ int ref;
+
+ idt82p33 = channel->idt82p33;
+ old_mask = idt82p33->extts_mask;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on falling edge */
+ if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_FALLING_EDGE))
+ return -EOPNOTSUPP;
+
+ if (index >= MAX_PHC_PLL)
+ return -EINVAL;
+
+ if (on) {
+ /* Return if it was already enabled */
+ if (idt82p33->extts_mask & mask)
+ return 0;
+
+ /* Use the pin configured for the channel */
+ ref = ptp_find_pin(channel->ptp_clock, PTP_PF_EXTTS, channel->plln);
+
+ if (ref < 0) {
+ dev_err(idt82p33->dev, "%s: No valid pin found for Pll%d!\n",
+ __func__, channel->plln);
+ return -EBUSY;
+ }
+
+ err = map_ref_to_tod_trig_sel(ref, &trigger);
+
+ if (err) {
+ dev_err(idt82p33->dev,
+ "%s: Unsupported ref %d!\n", __func__, ref);
+ return err;
+ }
+
+ err = arm_tod_read_with_trigger(&idt82p33->channel[index], trigger);
+
+ if (err == 0) {
+ idt82p33->extts_mask |= mask;
+ idt82p33->channel[index].tod_trigger = trigger;
+ idt82p33->event_channel[index] = channel;
+ idt82p33->extts_single_shot = is_one_shot(idt82p33->extts_mask);
+
+ if (old_mask)
+ return 0;
+
+ schedule_delayed_work(&idt82p33->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+ }
+ } else {
+ idt82p33->extts_mask &= ~mask;
+ idt82p33->extts_single_shot = is_one_shot(idt82p33->extts_mask);
+
+ if (idt82p33->extts_mask == 0)
+ cancel_delayed_work(&idt82p33->extts_work);
+ }
+
+ return err;
+}
+
+static int idt82p33_extts_check_channel(struct idt82p33 *idt82p33, u8 todn)
+{
+ struct idt82p33_channel *event_channel;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ int err;
+
+ err = idt82p33_get_extts(&idt82p33->channel[todn], &ts);
+ if (err == 0) {
+ event_channel = idt82p33->event_channel[todn];
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = todn;
+ event.timestamp = timespec64_to_ns(&ts);
+ ptp_clock_event(event_channel->ptp_clock,
+ &event);
+ }
+ return err;
+}
+
+static u8 idt82p33_extts_enable_mask(struct idt82p33_channel *channel,
+ u8 extts_mask, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 trigger = channel->tod_trigger;
+ u8 mask;
int err;
+ int i;
- trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+ if (extts_mask == 0)
+ return 0;
+ if (enable == false)
+ cancel_delayed_work_sync(&idt82p33->extts_work);
- err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
- &trigger, sizeof(trigger));
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if ((extts_mask & mask) == 0)
+ continue;
+ if (enable) {
+ err = arm_tod_read_with_trigger(&idt82p33->channel[i], trigger);
+ if (err)
+ dev_err(idt82p33->dev,
+ "%s: Arm ToD read trigger failed, err = %d",
+ __func__, err);
+ } else {
+ err = idt82p33_extts_check_channel(idt82p33, i);
+ if (err == 0 && idt82p33->extts_single_shot)
+ /* trigger happened so we won't re-enable it */
+ extts_mask &= ~mask;
+ }
+ }
+
+ if (enable)
+ schedule_delayed_work(&idt82p33->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ return extts_mask;
+}
+
+static int _idt82p33_gettime(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 old_mask = idt82p33->extts_mask;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 new_mask = 0;
+ int err;
+
+ /* Disable extts */
+ if (old_mask)
+ new_mask = idt82p33_extts_enable_mask(channel, old_mask, false);
+
+ err = idt82p33_set_tod_trigger(channel, HW_TOD_RD_TRIG_SEL_LSB_TOD_STS,
+ false);
if (err)
return err;
+ channel->discard_next_extts = true;
+
if (idt82p33->calculate_overhead_flag)
idt82p33->start_time = ktime_get_raw();
@@ -147,6 +397,10 @@ static int _idt82p33_gettime(struct idt82p33_channel *channel,
if (err)
return err;
+ /* Re-enable extts */
+ if (new_mask)
+ idt82p33_extts_enable_mask(channel, new_mask, true);
+
idt82p33_byte_array_to_timespec(ts, buf);
return 0;
@@ -165,19 +419,16 @@ static int _idt82p33_settime(struct idt82p33_channel *channel,
struct timespec64 local_ts = *ts;
char buf[TOD_BYTE_COUNT];
s64 dynamic_overhead_ns;
- unsigned char trigger;
int err;
u8 i;
- trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
-
- err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
- &trigger, sizeof(trigger));
-
+ err = idt82p33_set_tod_trigger(channel, HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ true);
if (err)
return err;
+ channel->discard_next_extts = true;
+
if (idt82p33->calculate_overhead_flag) {
dynamic_overhead_ns = ktime_to_ns(ktime_get_raw())
- ktime_to_ns(idt82p33->start_time);
@@ -202,7 +453,8 @@ static int _idt82p33_settime(struct idt82p33_channel *channel,
return err;
}
-static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
+static int _idt82p33_adjtime_immediate(struct idt82p33_channel *channel,
+ s64 delta_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
struct timespec64 ts;
@@ -226,6 +478,60 @@ static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
return err;
}
+static int _idt82p33_adjtime_internal_triggered(struct idt82p33_channel *channel,
+ s64 delta_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ char buf[TOD_BYTE_COUNT];
+ struct timespec64 ts;
+ const u8 delay_ns = 32;
+ s32 remainder;
+ s64 ns;
+ int err;
+
+ err = _idt82p33_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ if (ts.tv_nsec > (NSEC_PER_SEC - 5 * NSEC_PER_MSEC)) {
+ /* Too close to miss next trigger, so skip it */
+ mdelay(6);
+ ns = (ts.tv_sec + 2) * NSEC_PER_SEC + delta_ns + delay_ns;
+ } else
+ ns = (ts.tv_sec + 1) * NSEC_PER_SEC + delta_ns + delay_ns;
+
+ ts = ns_to_timespec64(ns);
+ idt82p33_timespec_to_byte_array(&ts, buf);
+
+ /*
+ * Store the new time value.
+ */
+ err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* Schedule to implement the workaround in one second */
+ (void)div_s64_rem(delta_ns, NSEC_PER_SEC, &remainder);
+ if (remainder != 0)
+ schedule_delayed_work(&channel->adjtime_work, HZ);
+
+ return idt82p33_set_tod_trigger(channel, HW_TOD_TRIG_SEL_TOD_PPS, true);
+}
+
+static void idt82p33_adjtime_workaround(struct work_struct *work)
+{
+ struct idt82p33_channel *channel = container_of(work,
+ struct idt82p33_channel,
+ adjtime_work.work);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(idt82p33->lock);
+ /* Workaround for TOD-to-output alignment issue */
+ _idt82p33_adjtime_internal_triggered(channel, 0);
+ mutex_unlock(idt82p33->lock);
+}
+
static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
@@ -233,25 +539,22 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
int err, i;
s64 fcw;
- if (scaled_ppm == channel->current_freq_ppb)
- return 0;
-
/*
- * Frequency Control Word unit is: 1.68 * 10^-10 ppm
+ * Frequency Control Word unit is: 1.6861512 * 10^-10 ppm
*
* adjfreq:
- * ppb * 10^9
- * FCW = ----------
- * 168
+ * ppb * 10^14
+ * FCW = -----------
+ * 16861512
*
* adjfine:
- * scaled_ppm * 5^12
- * FCW = -------------
- * 168 * 2^4
+ * scaled_ppm * 5^12 * 10^5
+ * FCW = ------------------------
+ * 16861512 * 2^4
*/
- fcw = scaled_ppm * 244140625ULL;
- fcw = div_s64(fcw, 2688);
+ fcw = scaled_ppm * 762939453125ULL;
+ fcw = div_s64(fcw, 8430756LL);
for (i = 0; i < 5; i++) {
buf[i] = fcw & 0xff;
@@ -266,26 +569,84 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
err = idt82p33_write(idt82p33, channel->dpll_freq_cnfg,
buf, sizeof(buf));
- if (err == 0)
- channel->current_freq_ppb = scaled_ppm;
-
return err;
}
+/* ppb = scaled_ppm * 125 / 2^13 */
+static s32 idt82p33_ddco_scaled_ppm(long current_ppm, s32 ddco_ppb)
+{
+ s64 scaled_ppm = div_s64(((s64)ddco_ppb << 13), 125);
+ s64 max_scaled_ppm = div_s64(((s64)DCO_MAX_PPB << 13), 125);
+
+ current_ppm += scaled_ppm;
+
+ if (current_ppm > max_scaled_ppm)
+ current_ppm = max_scaled_ppm;
+ else if (current_ppm < -max_scaled_ppm)
+ current_ppm = -max_scaled_ppm;
+
+ return (s32)current_ppm;
+}
+
+static int idt82p33_stop_ddco(struct idt82p33_channel *channel)
+{
+ int err;
+
+ err = _idt82p33_adjfine(channel, channel->current_freq);
+ if (err)
+ return err;
+
+ channel->ddco = false;
+
+ return 0;
+}
+
+static int idt82p33_start_ddco(struct idt82p33_channel *channel, s32 delta_ns)
+{
+ s32 current_ppm = channel->current_freq;
+ u32 duration_ms = MSEC_PER_SEC;
+ s32 ppb;
+ int err;
+
+ /* If the ToD correction is less than 5 nanoseconds, then skip it.
+ * The error introduced by the ToD adjustment procedure would be bigger
+ * than the required ToD correction
+ */
+ if (abs(delta_ns) < DDCO_THRESHOLD_NS)
+ return 0;
+
+ /* For most cases, keep ddco duration 1 second */
+ ppb = delta_ns;
+ while (abs(ppb) > DCO_MAX_PPB) {
+ duration_ms *= 2;
+ ppb /= 2;
+ }
+
+ err = _idt82p33_adjfine(channel,
+ idt82p33_ddco_scaled_ppm(current_ppm, ppb));
+ if (err)
+ return err;
+
+ /* schedule the worker to cancel ddco */
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(duration_ms) - 1);
+ channel->ddco = true;
+
+ return 0;
+}
+
static int idt82p33_measure_one_byte_write_overhead(
struct idt82p33_channel *channel, s64 *overhead_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
ktime_t start, stop;
+ u8 trigger = 0;
s64 total_ns;
- u8 trigger;
int err;
u8 i;
total_ns = 0;
*overhead_ns = 0;
- trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
@@ -307,8 +668,41 @@ static int idt82p33_measure_one_byte_write_overhead(
return err;
}
+static int idt82p33_measure_one_byte_read_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ ktime_t start, stop;
+ u8 trigger = 0;
+ s64 total_ns;
+ int err;
+ u8 i;
+
+ total_ns = 0;
+ *overhead_ns = 0;
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ stop = ktime_get_raw();
+
+ if (err)
+ return err;
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ *overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
static int idt82p33_measure_tod_write_9_byte_overhead(
- struct idt82p33_channel *channel)
+ struct idt82p33_channel *channel)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
@@ -368,7 +762,7 @@ static int idt82p33_measure_settime_gettime_gap_overhead(
static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
{
- s64 trailing_overhead_ns, one_byte_write_ns, gap_ns;
+ s64 trailing_overhead_ns, one_byte_write_ns, gap_ns, one_byte_read_ns;
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
@@ -388,12 +782,19 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
if (err)
return err;
+ err = idt82p33_measure_one_byte_read_overhead(channel,
+ &one_byte_read_ns);
+
+ if (err)
+ return err;
+
err = idt82p33_measure_tod_write_9_byte_overhead(channel);
if (err)
return err;
- trailing_overhead_ns = gap_ns - (2 * one_byte_write_ns);
+ trailing_overhead_ns = gap_ns - 2 * one_byte_write_ns
+ - one_byte_read_ns;
idt82p33->tod_write_overhead_ns -= trailing_overhead_ns;
@@ -462,6 +863,20 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
&sync_cnfg, sizeof(sync_cnfg));
}
+static long idt82p33_work_handler(struct ptp_clock_info *ptp)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(idt82p33->lock);
+ (void)idt82p33_stop_ddco(channel);
+ mutex_unlock(idt82p33->lock);
+
+ /* Return a negative value here to not reschedule */
+ return -1;
+}
+
static int idt82p33_output_enable(struct idt82p33_channel *channel,
bool enable, unsigned int outn)
{
@@ -480,40 +895,10 @@ static int idt82p33_output_enable(struct idt82p33_channel *channel,
return idt82p33_write(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
}
-static int idt82p33_output_mask_enable(struct idt82p33_channel *channel,
- bool enable)
-{
- u16 mask;
- int err;
- u8 outn;
-
- mask = channel->output_mask;
- outn = 0;
-
- while (mask) {
- if (mask & 0x1) {
- err = idt82p33_output_enable(channel, enable, outn);
- if (err)
- return err;
- }
-
- mask >>= 0x1;
- outn++;
- }
-
- return 0;
-}
-
static int idt82p33_perout_enable(struct idt82p33_channel *channel,
bool enable,
struct ptp_perout_request *perout)
{
- unsigned int flags = perout->flags;
-
- /* Enable/disable output based on output_mask */
- if (flags == PEROUT_ENABLE_OUTPUT_MASK)
- return idt82p33_output_mask_enable(channel, enable);
-
/* Enable/disable individual output instead */
return idt82p33_output_enable(channel, enable, perout->index);
}
@@ -546,14 +931,15 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
u8 i;
for (i = 0; i < MAX_PHC_PLL; i++) {
-
channel = &idt82p33->channel[i];
-
+ cancel_delayed_work_sync(&channel->adjtime_work);
if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
}
}
+
+
static int idt82p33_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -564,7 +950,8 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
mutex_lock(idt82p33->lock);
- if (rq->type == PTP_CLK_REQ_PEROUT) {
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
if (!on)
err = idt82p33_perout_enable(channel, false,
&rq->perout);
@@ -575,6 +962,12 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
else
err = idt82p33_perout_enable(channel, true,
&rq->perout);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ err = idt82p33_extts_enable(channel, rq, on);
+ break;
+ default:
+ break;
}
mutex_unlock(idt82p33->lock);
@@ -634,13 +1027,22 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
+ if (channel->ddco == true)
+ return 0;
+
+ if (scaled_ppm == channel->current_freq)
+ return 0;
+
mutex_lock(idt82p33->lock);
err = _idt82p33_adjfine(channel, scaled_ppm);
+
+ if (err == 0)
+ channel->current_freq = scaled_ppm;
mutex_unlock(idt82p33->lock);
+
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
-
return err;
}
@@ -651,14 +1053,21 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
+ if (channel->ddco == true)
+ return -EBUSY;
+
mutex_lock(idt82p33->lock);
if (abs(delta_ns) < phase_snap_threshold) {
+ err = idt82p33_start_ddco(channel, delta_ns);
mutex_unlock(idt82p33->lock);
- return 0;
+ return err;
}
- err = _idt82p33_adjtime(channel, delta_ns);
+ /* Use more accurate internal 1pps triggered write first */
+ err = _idt82p33_adjtime_internal_triggered(channel, delta_ns);
+ if (err && delta_ns > IMMEDIATE_SNAP_THRESHOLD_NS)
+ err = _idt82p33_adjtime_immediate(channel, delta_ns);
mutex_unlock(idt82p33->lock);
@@ -703,8 +1112,10 @@ static int idt82p33_settime(struct ptp_clock_info *ptp,
return err;
}
-static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
+static int idt82p33_channel_init(struct idt82p33 *idt82p33, u32 index)
{
+ struct idt82p33_channel *channel = &idt82p33->channel[index];
+
switch (index) {
case 0:
channel->dpll_tod_cnfg = DPLL1_TOD_CNFG;
@@ -730,22 +1141,60 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
return -EINVAL;
}
- channel->current_freq_ppb = 0;
+ channel->plln = index;
+ channel->current_freq = 0;
+ channel->idt82p33 = idt82p33;
+ INIT_DELAYED_WORK(&channel->adjtime_work, idt82p33_adjtime_workaround);
return 0;
}
-static void idt82p33_caps_init(struct ptp_clock_info *caps)
+static int idt82p33_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void idt82p33_caps_init(u32 index, struct ptp_clock_info *caps,
+ struct ptp_pin_desc *pin_cfg, u8 max_pins)
+{
+ struct ptp_pin_desc *ppd;
+ int i;
+
caps->owner = THIS_MODULE;
caps->max_adj = DCO_MAX_PPB;
- caps->n_per_out = 11;
- caps->adjphase = idt82p33_adjwritephase;
+ caps->n_per_out = MAX_PER_OUT;
+ caps->n_ext_ts = MAX_PHC_PLL,
+ caps->n_pins = max_pins,
+ caps->adjphase = idt82p33_adjwritephase,
caps->adjfine = idt82p33_adjfine;
caps->adjtime = idt82p33_adjtime;
caps->gettime64 = idt82p33_gettime;
caps->settime64 = idt82p33_settime;
caps->enable = idt82p33_enable;
+ caps->verify = idt82p33_verify_pin;
+ caps->do_aux_work = idt82p33_work_handler;
+
+ snprintf(caps->name, sizeof(caps->name), "IDT 82P33 PLL%u", index);
+
+ caps->pin_config = pin_cfg;
+
+ for (i = 0; i < max_pins; ++i) {
+ ppd = &pin_cfg[i];
+
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ ppd->chan = index;
+ snprintf(ppd->name, sizeof(ppd->name), "in%d", 12 + i);
+ }
}
static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
@@ -758,7 +1207,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
channel = &idt82p33->channel[index];
- err = idt82p33_channel_init(channel, index);
+ err = idt82p33_channel_init(idt82p33, index);
if (err) {
dev_err(idt82p33->dev,
"Channel_init failed in %s with err %d!\n",
@@ -766,11 +1215,8 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
return err;
}
- channel->idt82p33 = idt82p33;
-
- idt82p33_caps_init(&channel->caps);
- snprintf(channel->caps.name, sizeof(channel->caps.name),
- "IDT 82P33 PLL%u", index);
+ idt82p33_caps_init(index, &channel->caps,
+ pin_config[index], MAX_TRIG_CLK);
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
@@ -805,17 +1251,46 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
return 0;
}
+static int idt82p33_reset(struct idt82p33 *idt82p33, bool cold)
+{
+ int err;
+ u8 cfg = SOFT_RESET_EN;
+
+ if (cold == true)
+ goto cold_reset;
+
+ err = idt82p33_read(idt82p33, REG_SOFT_RESET, &cfg, sizeof(cfg));
+ if (err) {
+ dev_err(idt82p33->dev,
+ "Soft reset failed with err %d!\n", err);
+ return err;
+ }
+
+ cfg |= SOFT_RESET_EN;
+
+cold_reset:
+ err = idt82p33_write(idt82p33, REG_SOFT_RESET, &cfg, sizeof(cfg));
+ if (err)
+ dev_err(idt82p33->dev,
+ "Cold reset failed with err %d!\n", err);
+ return err;
+}
+
static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
{
+ char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idt82p33_fwrc *rec;
u8 loaddr, page, val;
int err;
s32 len;
- dev_dbg(idt82p33->dev, "requesting firmware '%s'\n", FW_FILENAME);
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
- err = request_firmware(&fw, FW_FILENAME, idt82p33->dev);
+ dev_info(idt82p33->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, idt82p33->dev);
if (err) {
dev_err(idt82p33->dev,
@@ -863,6 +1338,46 @@ out:
return err;
}
+static void idt82p33_extts_check(struct work_struct *work)
+{
+ struct idt82p33 *idt82p33 = container_of(work, struct idt82p33,
+ extts_work.work);
+ struct idt82p33_channel *channel;
+ int err;
+ u8 mask;
+ int i;
+
+ if (idt82p33->extts_mask == 0)
+ return;
+
+ mutex_lock(idt82p33->lock);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if ((idt82p33->extts_mask & mask) == 0)
+ continue;
+
+ err = idt82p33_extts_check_channel(idt82p33, i);
+
+ if (err == 0) {
+ /* trigger clears itself, so clear the mask */
+ if (idt82p33->extts_single_shot) {
+ idt82p33->extts_mask &= ~mask;
+ } else {
+ /* Re-arm */
+ channel = &idt82p33->channel[i];
+ arm_tod_read_with_trigger(channel, channel->tod_trigger);
+ }
+ }
+ }
+
+ if (idt82p33->extts_mask)
+ schedule_delayed_work(&idt82p33->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ mutex_unlock(idt82p33->lock);
+}
static int idt82p33_probe(struct platform_device *pdev)
{
@@ -885,25 +1400,33 @@ static int idt82p33_probe(struct platform_device *pdev)
idt82p33->pll_mask = DEFAULT_PLL_MASK;
idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idt82p33->extts_mask = 0;
+ INIT_DELAYED_WORK(&idt82p33->extts_work, idt82p33_extts_check);
mutex_lock(idt82p33->lock);
- err = idt82p33_load_firmware(idt82p33);
+ /* cold reset before loading firmware */
+ idt82p33_reset(idt82p33, true);
+ err = idt82p33_load_firmware(idt82p33);
if (err)
dev_warn(idt82p33->dev,
"loading firmware failed with %d\n", err);
+ /* soft reset after loading firmware */
+ idt82p33_reset(idt82p33, false);
+
if (idt82p33->pll_mask) {
for (i = 0; i < MAX_PHC_PLL; i++) {
- if (idt82p33->pll_mask & (1 << i)) {
+ if (idt82p33->pll_mask & (1 << i))
err = idt82p33_enable_channel(idt82p33, i);
- if (err) {
- dev_err(idt82p33->dev,
- "Failed in %s with err %d!\n",
- __func__, err);
- break;
- }
+ else
+ err = idt82p33_channel_init(idt82p33, i);
+ if (err) {
+ dev_err(idt82p33->dev,
+ "Failed in %s with err %d!\n",
+ __func__, err);
+ break;
}
}
} else {
@@ -928,6 +1451,8 @@ static int idt82p33_remove(struct platform_device *pdev)
{
struct idt82p33 *idt82p33 = platform_get_drvdata(pdev);
+ cancel_delayed_work_sync(&idt82p33->extts_work);
+
idt82p33_ptp_clock_unregister_all(idt82p33);
return 0;
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
index 0ea1c35c0f9f..8fcb0b17d207 100644
--- a/drivers/ptp/ptp_idt82p33.h
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -13,6 +13,8 @@
#define FW_FILENAME "idt82p33xxx.bin"
#define MAX_PHC_PLL (2)
+#define MAX_TRIG_CLK (3)
+#define MAX_PER_OUT (11)
#define TOD_BYTE_COUNT (10)
#define DCO_MAX_PPB (92000)
#define MAX_MEASURMENT_COUNT (5)
@@ -20,7 +22,6 @@
#define IMMEDIATE_SNAP_THRESHOLD_NS (50000)
#define DDCO_THRESHOLD_NS (5)
#define IDT82P33_MAX_WRITE_COUNT (512)
-#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
#define PLLMASK_ADDR_HI 0xFF
#define PLLMASK_ADDR_LO 0xA5
@@ -60,8 +61,18 @@ struct idt82p33_channel {
struct ptp_clock *ptp_clock;
struct idt82p33 *idt82p33;
enum pll_mode pll_mode;
- s32 current_freq_ppb;
+ /* Workaround for TOD-to-output alignment issue */
+ struct delayed_work adjtime_work;
+ s32 current_freq;
+ /* double dco mode */
+ bool ddco;
u8 output_mask;
+ /* last input trigger for extts */
+ u8 tod_trigger;
+ bool discard_next_extts;
+ u8 plln;
+ /* remember last tod_sts for extts */
+ u8 extts_tod_sts[TOD_BYTE_COUNT];
u16 dpll_tod_cnfg;
u16 dpll_tod_trigger;
u16 dpll_tod_sts;
@@ -76,6 +87,12 @@ struct idt82p33 {
struct idt82p33_channel channel[MAX_PHC_PLL];
struct device *dev;
u8 pll_mask;
+ /* Polls for external time stamps */
+ u8 extts_mask;
+ bool extts_single_shot;
+ struct delayed_work extts_work;
+ /* Remember the ptp channel to report extts */
+ struct idt82p33_channel *event_channel[MAX_PHC_PLL];
/* Mutex to protect operations from being interrupted */
struct mutex *lock;
struct regmap *regmap;
diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c
index fcae32f56f25..9141162c4237 100644
--- a/drivers/ptp/ptp_kvm_common.c
+++ b/drivers/ptp/ptp_kvm_common.c
@@ -66,7 +66,7 @@ static int ptp_kvm_getcrosststamp(struct ptp_clock_info *ptp,
* PTP clock operations
*/
-static int ptp_kvm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ptp_kvm_adjfine(struct ptp_clock_info *ptp, long delta)
{
return -EOPNOTSUPP;
}
@@ -115,7 +115,7 @@ static const struct ptp_clock_info ptp_kvm_caps = {
.n_ext_ts = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = ptp_kvm_adjfreq,
+ .adjfine = ptp_kvm_adjfine,
.adjtime = ptp_kvm_adjtime,
.gettime64 = ptp_kvm_gettime,
.settime64 = ptp_kvm_settime,
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index a48d9b7d2921..4bbaccd543ad 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -13,9 +13,11 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-xiic.h>
+#include <linux/platform_data/i2c-ocores.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/spi/spi.h>
#include <linux/spi/xilinx_spi.h>
+#include <linux/spi/altera.h>
#include <net/devlink.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
@@ -28,6 +30,9 @@
#define PCI_VENDOR_ID_CELESTICA 0x18d4
#define PCI_DEVICE_ID_CELESTICA_TIMECARD 0x1008
+#define PCI_VENDOR_ID_OROLIA 0x1ad7
+#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000
+
static struct class timecard_class = {
.owner = THIS_MODULE,
.name = "timecard",
@@ -203,6 +208,11 @@ struct frequency_reg {
u32 ctrl;
u32 status;
};
+
+struct board_config_reg {
+ u32 mro50_serial_activate;
+};
+
#define FREQ_STATUS_VALID BIT(31)
#define FREQ_STATUS_ERROR BIT(30)
#define FREQ_STATUS_OVERRUN BIT(29)
@@ -278,6 +288,11 @@ struct ptp_ocp_signal {
bool running;
};
+struct ptp_ocp_serial_port {
+ int line;
+ int baud;
+};
+
#define OCP_BOARD_ID_LEN 13
#define OCP_SERIAL_LEN 6
@@ -289,6 +304,7 @@ struct ptp_ocp {
struct tod_reg __iomem *tod;
struct pps_reg __iomem *pps_to_ext;
struct pps_reg __iomem *pps_to_clk;
+ struct board_config_reg __iomem *board_config;
struct gpio_reg __iomem *pps_select;
struct gpio_reg __iomem *sma_map1;
struct gpio_reg __iomem *sma_map2;
@@ -305,6 +321,7 @@ struct ptp_ocp {
struct ptp_ocp_ext_src *ts2;
struct ptp_ocp_ext_src *ts3;
struct ptp_ocp_ext_src *ts4;
+ struct ocp_art_gpio_reg __iomem *art_sma;
struct img_reg __iomem *image;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
@@ -318,10 +335,10 @@ struct ptp_ocp {
time64_t gnss_lost;
int id;
int n_irqs;
- int gnss_port;
- int gnss2_port;
- int mac_port; /* miniature atomic clock */
- int nmea_port;
+ struct ptp_ocp_serial_port gnss_port;
+ struct ptp_ocp_serial_port gnss2_port;
+ struct ptp_ocp_serial_port mac_port; /* miniature atomic clock */
+ struct ptp_ocp_serial_port nmea_port;
bool fw_loader;
u8 fw_tag;
u16 fw_version;
@@ -365,8 +382,12 @@ static int ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
static int ptp_ocp_signal_enable(void *priv, u32 req, bool enable);
static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
+static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+
static const struct ocp_attr_group fb_timecard_groups[];
+static const struct ocp_attr_group art_timecard_groups[];
+
struct ptp_ocp_eeprom_map {
u16 off;
u16 len;
@@ -389,6 +410,12 @@ static struct ptp_ocp_eeprom_map fb_eeprom_map[] = {
{ }
};
+static struct ptp_ocp_eeprom_map art_eeprom_map[] = {
+ { EEPROM_ENTRY(0x200 + 0x43, board_id) },
+ { EEPROM_ENTRY(0x200 + 0x63, serial) },
+ { }
+};
+
#define bp_assign_entry(bp, res, val) ({ \
uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \
*(typeof(val) *)addr = val; \
@@ -430,6 +457,13 @@ static struct ptp_ocp_eeprom_map fb_eeprom_map[] = {
* 14: Signal Generator 4
* 15: TS3
* 16: TS4
+ --
+ * 8: Orolia TS1
+ * 10: Orolia TS2
+ * 11: Orolia TS0 (GNSS)
+ * 12: Orolia PPS
+ * 14: Orolia TS3
+ * 15: Orolia TS4
*/
static struct ocp_resource ocp_fb_resource[] = {
@@ -596,14 +630,23 @@ static struct ocp_resource ocp_fb_resource[] = {
{
OCP_SERIAL_RESOURCE(gnss_port),
.offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
},
{
OCP_SERIAL_RESOURCE(gnss2_port),
.offset = 0x00170000 + 0x1000, .irq_vec = 4,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
},
{
OCP_SERIAL_RESOURCE(mac_port),
.offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 57600,
+ },
},
{
OCP_SERIAL_RESOURCE(nmea_port),
@@ -647,9 +690,141 @@ static struct ocp_resource ocp_fb_resource[] = {
{ }
};
+#define OCP_ART_CONFIG_SIZE 144
+#define OCP_ART_TEMP_TABLE_SIZE 368
+
+struct ocp_art_gpio_reg {
+ struct {
+ u32 gpio;
+ u32 __pad[3];
+ } map[4];
+};
+
+static struct ocp_resource ocp_art_resource[] = {
+ {
+ OCP_MEM_RESOURCE(reg),
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+ OCP_SERIAL_RESOURCE(gnss_port),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(art_sma),
+ .offset = 0x003C0000, .size = 0x1000,
+ },
+ /* Timestamp associated with GNSS1 receiver PPS */
+ {
+ OCP_EXT_RESOURCE(ts0),
+ .offset = 0x360000, .size = 0x20, .irq_vec = 12,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 0,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts1),
+ .offset = 0x380000, .size = 0x20, .irq_vec = 8,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x390000, .size = 0x20, .irq_vec = 10,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts3),
+ .offset = 0x3A0000, .size = 0x20, .irq_vec = 14,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 3,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts4),
+ .offset = 0x3B0000, .size = 0x20, .irq_vec = 15,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 4,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ /* Timestamp associated with Internal PPS of the card */
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x00330000, .size = 0x20, .irq_vec = 11,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 5,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_SPI_RESOURCE(spi_flash),
+ .offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
+ .extra = &(struct ptp_ocp_flash_info) {
+ .name = "spi_altera", .pci_offset = 0,
+ .data_size = sizeof(struct altera_spi_platform_data),
+ .data = &(struct altera_spi_platform_data) {
+ .num_chipselect = 1,
+ .num_devices = 1,
+ .devices = &(struct spi_board_info) {
+ .modalias = "spi-nor",
+ },
+ },
+ },
+ },
+ {
+ OCP_I2C_RESOURCE(i2c_ctrl),
+ .offset = 0x350000, .size = 0x100, .irq_vec = 4,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "ocores-i2c",
+ .fixed_rate = 400000,
+ .data_size = sizeof(struct ocores_i2c_platform_data),
+ .data = &(struct ocores_i2c_platform_data) {
+ .clock_khz = 125000,
+ .bus_khz = 400,
+ .num_devices = 1,
+ .devices = &(struct i2c_board_info) {
+ I2C_BOARD_INFO("24c08", 0x50),
+ },
+ },
+ },
+ },
+ {
+ OCP_SERIAL_RESOURCE(mac_port),
+ .offset = 0x00190000, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(board_config),
+ .offset = 0x210000, .size = 0x1000,
+ },
+ {
+ .setup = ptp_ocp_art_board_init,
+ },
+ { }
+};
+
static const struct pci_device_id ptp_ocp_pcidev_id[] = {
{ PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
+ { PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
{ }
};
MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
@@ -714,6 +889,19 @@ static const struct ocp_selector ptp_ocp_sma_out[] = {
{ }
};
+static const struct ocp_selector ptp_ocp_art_sma_in[] = {
+ { .name = "PPS1", .value = 0x0001 },
+ { .name = "10Mhz", .value = 0x0008 },
+ { }
+};
+
+static const struct ocp_selector ptp_ocp_art_sma_out[] = {
+ { .name = "PHC", .value = 0x0002 },
+ { .name = "GNSS", .value = 0x0004 },
+ { .name = "10Mhz", .value = 0x0010 },
+ { }
+};
+
struct ocp_sma_op {
const struct ocp_selector *tbl[2];
void (*init)(struct ptp_ocp *bp);
@@ -1342,11 +1530,9 @@ ptp_ocp_devlink_fw_image(struct devlink *devlink, const struct firmware *fw,
hdr = (const struct ptp_ocp_firmware_header *)fw->data;
if (memcmp(hdr->magic, OCP_FIRMWARE_MAGIC_HEADER, 4)) {
devlink_flash_update_status_notify(devlink,
- "No firmware header found, flashing raw image",
+ "No firmware header found, cancel firmware upgrade",
NULL, 0, 0);
- offset = 0;
- length = fw->size;
- goto out;
+ return -EINVAL;
}
if (be16_to_cpu(hdr->pci_vendor_id) != bp->pdev->vendor ||
@@ -1374,7 +1560,6 @@ ptp_ocp_devlink_fw_image(struct devlink *devlink, const struct firmware *fw,
return -EINVAL;
}
-out:
*data = &fw->data[offset];
*size = length;
@@ -1462,10 +1647,6 @@ ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
char buf[32];
int err;
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
-
fw_image = bp->fw_loader ? "loader" : "fw";
sprintf(buf, "%d.%d", bp->fw_tag, bp->fw_version);
err = devlink_info_version_running_put(req, fw_image, buf);
@@ -1872,11 +2053,15 @@ ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r)
static int
ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r)
{
- int port;
+ struct ptp_ocp_serial_port *p = (struct ptp_ocp_serial_port *)r->extra;
+ struct ptp_ocp_serial_port port = {};
+
+ port.line = ptp_ocp_serial_line(bp, r);
+ if (port.line < 0)
+ return port.line;
- port = ptp_ocp_serial_line(bp, r);
- if (port < 0)
- return port;
+ if (p)
+ port.baud = p->baud;
bp_assign_entry(bp, r, port);
@@ -2257,6 +2442,121 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
return err;
}
+static void
+ptp_ocp_art_sma_init(struct ptp_ocp *bp)
+{
+ u32 reg;
+ int i;
+
+ /* defaults */
+ bp->sma[0].mode = SMA_MODE_IN;
+ bp->sma[1].mode = SMA_MODE_IN;
+ bp->sma[2].mode = SMA_MODE_OUT;
+ bp->sma[3].mode = SMA_MODE_OUT;
+
+ bp->sma[0].default_fcn = 0x08; /* IN: 10Mhz */
+ bp->sma[1].default_fcn = 0x01; /* IN: PPS1 */
+ bp->sma[2].default_fcn = 0x10; /* OUT: 10Mhz */
+ bp->sma[3].default_fcn = 0x02; /* OUT: PHC */
+
+ /* If no SMA map, the pin functions and directions are fixed. */
+ if (!bp->art_sma) {
+ for (i = 0; i < 4; i++) {
+ bp->sma[i].fixed_fcn = true;
+ bp->sma[i].fixed_dir = true;
+ }
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg = ioread32(&bp->art_sma->map[i].gpio);
+
+ switch (reg & 0xff) {
+ case 0:
+ bp->sma[i].fixed_fcn = true;
+ bp->sma[i].fixed_dir = true;
+ break;
+ case 1:
+ case 8:
+ bp->sma[i].mode = SMA_MODE_IN;
+ break;
+ default:
+ bp->sma[i].mode = SMA_MODE_OUT;
+ break;
+ }
+ }
+}
+
+static u32
+ptp_ocp_art_sma_get(struct ptp_ocp *bp, int sma_nr)
+{
+ if (bp->sma[sma_nr - 1].fixed_fcn)
+ return bp->sma[sma_nr - 1].default_fcn;
+
+ return ioread32(&bp->art_sma->map[sma_nr - 1].gpio) & 0xff;
+}
+
+/* note: store 0 is considered invalid. */
+static int
+ptp_ocp_art_sma_set(struct ptp_ocp *bp, int sma_nr, u32 val)
+{
+ unsigned long flags;
+ u32 __iomem *gpio;
+ int err = 0;
+ u32 reg;
+
+ val &= SMA_SELECT_MASK;
+ if (hweight32(val) > 1)
+ return -EINVAL;
+
+ gpio = &bp->art_sma->map[sma_nr - 1].gpio;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ reg = ioread32(gpio);
+ if (((reg >> 16) & val) == 0) {
+ err = -EOPNOTSUPP;
+ } else {
+ reg = (reg & 0xff00) | (val & 0xff);
+ iowrite32(reg, gpio);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return err;
+}
+
+static const struct ocp_sma_op ocp_art_sma_op = {
+ .tbl = { ptp_ocp_art_sma_in, ptp_ocp_art_sma_out },
+ .init = ptp_ocp_art_sma_init,
+ .get = ptp_ocp_art_sma_get,
+ .set_inputs = ptp_ocp_art_sma_set,
+ .set_output = ptp_ocp_art_sma_set,
+};
+
+/* ART specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ int err;
+
+ bp->flash_start = 0x1000000;
+ bp->eeprom_map = art_eeprom_map;
+ bp->fw_cap = OCP_CAP_BASIC;
+ bp->fw_version = ioread32(&bp->reg->version);
+ bp->fw_tag = 2;
+ bp->sma_op = &ocp_art_sma_op;
+
+ /* Enable MAC serial port during initialisation */
+ iowrite32(1, &bp->board_config->mro50_serial_activate);
+
+ ptp_ocp_sma_init(bp);
+
+ err = ptp_ocp_attr_group_add(bp, art_timecard_groups);
+ if (err)
+ return err;
+
+ return ptp_ocp_init_clock(bp);
+}
+
static ssize_t
ptp_ocp_show_output(const struct ocp_selector *tbl, u32 val, char *buf,
int def_val)
@@ -3030,6 +3330,130 @@ DEVICE_FREQ_GROUP(freq2, 1);
DEVICE_FREQ_GROUP(freq3, 2);
DEVICE_FREQ_GROUP(freq4, 3);
+static ssize_t
+disciplining_config_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
+ size_t size = OCP_ART_CONFIG_SIZE;
+ struct nvmem_device *nvmem;
+ ssize_t err;
+
+ nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ if (off > size) {
+ err = 0;
+ goto out;
+ }
+
+ if (off + count > size)
+ count = size - off;
+
+ // the configuration is in the very beginning of the EEPROM
+ err = nvmem_device_read(nvmem, off, count, buf);
+ if (err != count) {
+ err = -EFAULT;
+ goto out;
+ }
+
+out:
+ ptp_ocp_nvmem_device_put(&nvmem);
+
+ return err;
+}
+
+static ssize_t
+disciplining_config_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
+ struct nvmem_device *nvmem;
+ ssize_t err;
+
+ /* Allow write of the whole area only */
+ if (off || count != OCP_ART_CONFIG_SIZE)
+ return -EFAULT;
+
+ nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ err = nvmem_device_write(nvmem, 0x00, count, buf);
+ if (err != count)
+ err = -EFAULT;
+
+ ptp_ocp_nvmem_device_put(&nvmem);
+
+ return err;
+}
+static BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
+
+static ssize_t
+temperature_table_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
+ size_t size = OCP_ART_TEMP_TABLE_SIZE;
+ struct nvmem_device *nvmem;
+ ssize_t err;
+
+ nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ if (off > size) {
+ err = 0;
+ goto out;
+ }
+
+ if (off + count > size)
+ count = size - off;
+
+ // the configuration is in the very beginning of the EEPROM
+ err = nvmem_device_read(nvmem, 0x90 + off, count, buf);
+ if (err != count) {
+ err = -EFAULT;
+ goto out;
+ }
+
+out:
+ ptp_ocp_nvmem_device_put(&nvmem);
+
+ return err;
+}
+
+static ssize_t
+temperature_table_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
+ struct nvmem_device *nvmem;
+ ssize_t err;
+
+ /* Allow write of the whole area only */
+ if (off || count != OCP_ART_TEMP_TABLE_SIZE)
+ return -EFAULT;
+
+ nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ err = nvmem_device_write(nvmem, 0x90, count, buf);
+ if (err != count)
+ err = -EFAULT;
+
+ ptp_ocp_nvmem_device_put(&nvmem);
+
+ return err;
+}
+static BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
+
static struct attribute *fb_timecard_attrs[] = {
&dev_attr_serialnum.attr,
&dev_attr_gnss_sync.attr,
@@ -3049,9 +3473,11 @@ static struct attribute *fb_timecard_attrs[] = {
&dev_attr_tod_correction.attr,
NULL,
};
+
static const struct attribute_group fb_timecard_group = {
.attrs = fb_timecard_attrs,
};
+
static const struct ocp_attr_group fb_timecard_groups[] = {
{ .cap = OCP_CAP_BASIC, .group = &fb_timecard_group },
{ .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
@@ -3065,6 +3491,37 @@ static const struct ocp_attr_group fb_timecard_groups[] = {
{ },
};
+static struct attribute *art_timecard_attrs[] = {
+ &dev_attr_serialnum.attr,
+ &dev_attr_clock_source.attr,
+ &dev_attr_available_clock_sources.attr,
+ &dev_attr_utc_tai_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ NULL,
+};
+
+static struct bin_attribute *bin_art_timecard_attrs[] = {
+ &bin_attr_disciplining_config,
+ &bin_attr_temperature_table,
+ NULL,
+};
+
+static const struct attribute_group art_timecard_group = {
+ .attrs = art_timecard_attrs,
+ .bin_attrs = bin_art_timecard_attrs,
+};
+
+static const struct ocp_attr_group art_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &art_timecard_group },
+ { },
+};
+
static void
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
const char *def)
@@ -3177,14 +3634,16 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
bp = dev_get_drvdata(dev);
seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
- if (bp->gnss_port != -1)
- seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1", bp->gnss_port);
- if (bp->gnss2_port != -1)
- seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2", bp->gnss2_port);
- if (bp->mac_port != -1)
- seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port);
- if (bp->nmea_port != -1)
- seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port);
+ if (bp->gnss_port.line != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1",
+ bp->gnss_port.line);
+ if (bp->gnss2_port.line != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2",
+ bp->gnss2_port.line);
+ if (bp->mac_port.line != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port.line);
+ if (bp->nmea_port.line != -1)
+ seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port.line);
memset(sma_val, 0xff, sizeof(sma_val));
if (bp->sma_map1) {
@@ -3508,10 +3967,10 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
bp->ptp_info = ptp_ocp_clock_info;
spin_lock_init(&bp->lock);
- bp->gnss_port = -1;
- bp->gnss2_port = -1;
- bp->mac_port = -1;
- bp->nmea_port = -1;
+ bp->gnss_port.line = -1;
+ bp->gnss2_port.line = -1;
+ bp->mac_port.line = -1;
+ bp->nmea_port.line = -1;
bp->pdev = pdev;
device_initialize(&bp->dev);
@@ -3569,20 +4028,20 @@ ptp_ocp_complete(struct ptp_ocp *bp)
struct pps_device *pps;
char buf[32];
- if (bp->gnss_port != -1) {
- sprintf(buf, "ttyS%d", bp->gnss_port);
+ if (bp->gnss_port.line != -1) {
+ sprintf(buf, "ttyS%d", bp->gnss_port.line);
ptp_ocp_link_child(bp, buf, "ttyGNSS");
}
- if (bp->gnss2_port != -1) {
- sprintf(buf, "ttyS%d", bp->gnss2_port);
+ if (bp->gnss2_port.line != -1) {
+ sprintf(buf, "ttyS%d", bp->gnss2_port.line);
ptp_ocp_link_child(bp, buf, "ttyGNSS2");
}
- if (bp->mac_port != -1) {
- sprintf(buf, "ttyS%d", bp->mac_port);
+ if (bp->mac_port.line != -1) {
+ sprintf(buf, "ttyS%d", bp->mac_port.line);
ptp_ocp_link_child(bp, buf, "ttyMAC");
}
- if (bp->nmea_port != -1) {
- sprintf(buf, "ttyS%d", bp->nmea_port);
+ if (bp->nmea_port.line != -1) {
+ sprintf(buf, "ttyS%d", bp->nmea_port.line);
ptp_ocp_link_child(bp, buf, "ttyNMEA");
}
sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
@@ -3638,16 +4097,20 @@ ptp_ocp_info(struct ptp_ocp *bp)
ptp_ocp_phc_info(bp);
- ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200);
- ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200);
- ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600);
- if (bp->nmea_out && bp->nmea_port != -1) {
- int baud = -1;
+ ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port.line,
+ bp->gnss_port.baud);
+ ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port.line,
+ bp->gnss2_port.baud);
+ ptp_ocp_serial_info(dev, "MAC", bp->mac_port.line, bp->mac_port.baud);
+ if (bp->nmea_out && bp->nmea_port.line != -1) {
+ bp->nmea_port.baud = -1;
reg = ioread32(&bp->nmea_out->uart_baud);
if (reg < ARRAY_SIZE(nmea_baud))
- baud = nmea_baud[reg];
- ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port, baud);
+ bp->nmea_port.baud = nmea_baud[reg];
+
+ ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port.line,
+ bp->nmea_port.baud);
}
}
@@ -3688,14 +4151,14 @@ ptp_ocp_detach(struct ptp_ocp *bp)
for (i = 0; i < 4; i++)
if (bp->signal_out[i])
ptp_ocp_unregister_ext(bp->signal_out[i]);
- if (bp->gnss_port != -1)
- serial8250_unregister_port(bp->gnss_port);
- if (bp->gnss2_port != -1)
- serial8250_unregister_port(bp->gnss2_port);
- if (bp->mac_port != -1)
- serial8250_unregister_port(bp->mac_port);
- if (bp->nmea_port != -1)
- serial8250_unregister_port(bp->nmea_port);
+ if (bp->gnss_port.line != -1)
+ serial8250_unregister_port(bp->gnss_port.line);
+ if (bp->gnss2_port.line != -1)
+ serial8250_unregister_port(bp->gnss2_port.line);
+ if (bp->mac_port.line != -1)
+ serial8250_unregister_port(bp->mac_port.line);
+ if (bp->nmea_port.line != -1)
+ serial8250_unregister_port(bp->nmea_port.line);
platform_device_unregister(bp->spi_flash);
platform_device_unregister(bp->i2c_ctrl);
if (bp->i2c_clk)
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 7d4da9e605ef..33355d5eb033 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -336,24 +336,13 @@ static irqreturn_t isr(int irq, void *priv)
* PTP clock operations
*/
-static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ptp_pch_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- u64 adj;
- u32 diff, addend;
- int neg_adj = 0;
+ u32 addend;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- addend = DEFAULT_ADDEND;
- adj = addend;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
-
- addend = neg_adj ? addend - diff : addend + diff;
+ addend = adjust_by_scaled_ppm(DEFAULT_ADDEND, scaled_ppm);
iowrite32(addend, &regs->addend);
@@ -440,7 +429,7 @@ static const struct ptp_clock_info ptp_pch_caps = {
.n_ext_ts = N_EXT_TS,
.n_pins = 0,
.pps = 0,
- .adjfreq = ptp_pch_adjfreq,
+ .adjfine = ptp_pch_adjfine,
.adjtime = ptp_pch_adjtime,
.gettime64 = ptp_pch_gettime,
.settime64 = ptp_pch_settime,
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
index 5dca26e14bdc..d64eec5b1788 100644
--- a/drivers/ptp/ptp_vmw.c
+++ b/drivers/ptp/ptp_vmw.c
@@ -47,7 +47,7 @@ static int ptp_vmw_adjtime(struct ptp_clock_info *info, s64 delta)
return -EOPNOTSUPP;
}
-static int ptp_vmw_adjfreq(struct ptp_clock_info *info, s32 delta)
+static int ptp_vmw_adjfine(struct ptp_clock_info *info, long delta)
{
return -EOPNOTSUPP;
}
@@ -79,7 +79,7 @@ static struct ptp_clock_info ptp_vmw_clock_info = {
.name = "ptp_vmw",
.max_adj = 0,
.adjtime = ptp_vmw_adjtime,
- .adjfreq = ptp_vmw_adjfreq,
+ .adjfine = ptp_vmw_adjfine,
.gettime64 = ptp_vmw_gettime,
.settime64 = ptp_vmw_settime,
.enable = ptp_vmw_enable,
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 37b551bd43bf..bdfab9ea0046 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -825,16 +825,9 @@ done:
/*
* Start transmission of a packet.
* Called from generic network device layer.
- *
- * skb Pointer to buffer containing the packet.
- * dev Pointer to interface struct.
- *
- * returns 0 if packet consumed, !0 if packet rejected.
- * Note: If we return !0, then the packet is free'd by
- * the generic network layer.
*/
/* first merge version - leaving both functions separated */
-static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev)
{
struct ctcm_priv *priv = dev->ml_priv;
@@ -877,7 +870,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
}
/* unmerged MPC variant of ctcm_tx */
-static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
{
int len = 0;
struct ctcm_priv *priv = dev->ml_priv;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 84c8981317b4..38f312664ce7 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1519,9 +1519,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
/*
* Packet transmit function called by network stack
*/
-static int
-__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+ struct net_device *dev)
{
struct lcs_header *header;
int rc = NETDEV_TX_OK;
@@ -1582,8 +1581,7 @@ out:
return rc;
}
-static int
-lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lcs_card *card;
int rc;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65aa0a96c21d..66076cada8ae 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1248,15 +1248,8 @@ static int netiucv_close(struct net_device *dev)
/*
* Start transmission of a packet.
* Called from generic network device layer.
- *
- * @param skb Pointer to buffer containing the packet.
- * @param dev Pointer to interface struct.
- *
- * @return 0 if packet consumed, !0 if packet rejected.
- * Note: If we return !0, then the packet is free'd by
- * the generic network layer.
*/
-static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netiucv_priv *privptr = netdev_priv(dev);
int rc;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 56c3cf3ba53d..45e5eccadb44 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1685,6 +1685,7 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static const struct ieee80211_ops vnt_mac_ops = {
.tx = vnt_tx_80211,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
.stop = vnt_stop,
.add_interface = vnt_add_interface,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 897ee0f7fc6b..2abae90f3f52 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -957,6 +957,7 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static const struct ieee80211_ops vnt_mac_ops = {
.tx = vnt_tx_80211,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
.stop = vnt_stop,
.add_interface = vnt_add_interface,